##// END OF EJS Templates
localrepo: use file API via vfs while ensuring repository directory...
FUJIWARA Katsunori -
r17161:be016e96 default
parent child Browse files
Show More
@@ -1,2457 +1,2457 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 class localrepository(repo.repository):
27 27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 28 'known', 'getbundle'))
29 29 supportedformats = set(('revlogv1', 'generaldelta'))
30 30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 31 'dotencode'))
32 32 openerreqs = set(('revlogv1', 'generaldelta'))
33 33 requirements = ['revlogv1']
34 34
35 35 def _baserequirements(self, create):
36 36 return self.requirements[:]
37 37
38 38 def __init__(self, baseui, path=None, create=False):
39 39 repo.repository.__init__(self)
40 40 self.wopener = scmutil.opener(path, expand=True)
41 41 self.wvfs = self.wopener
42 42 self.root = self.wvfs.base
43 43 self.path = self.wvfs.join(".hg")
44 44 self.origroot = path
45 45 self.auditor = scmutil.pathauditor(self.root, self._checknested)
46 46 self.opener = scmutil.opener(self.path)
47 47 self.vfs = self.opener
48 48 self.baseui = baseui
49 49 self.ui = baseui.copy()
50 50 # A list of callback to shape the phase if no data were found.
51 51 # Callback are in the form: func(repo, roots) --> processed root.
52 52 # This list it to be filled by extension during repo setup
53 53 self._phasedefaults = []
54 54
55 55 try:
56 56 self.ui.readconfig(self.join("hgrc"), self.root)
57 57 extensions.loadall(self.ui)
58 58 except IOError:
59 59 pass
60 60
61 if not os.path.isdir(self.path):
61 if not self.vfs.isdir():
62 62 if create:
63 if not os.path.exists(self.root):
64 util.makedirs(self.root)
65 util.makedir(self.path, notindexed=True)
63 if not self.wvfs.exists():
64 self.wvfs.makedirs()
65 self.vfs.makedir(notindexed=True)
66 66 requirements = self._baserequirements(create)
67 67 if self.ui.configbool('format', 'usestore', True):
68 os.mkdir(os.path.join(self.path, "store"))
68 self.vfs.mkdir("store")
69 69 requirements.append("store")
70 70 if self.ui.configbool('format', 'usefncache', True):
71 71 requirements.append("fncache")
72 72 if self.ui.configbool('format', 'dotencode', True):
73 73 requirements.append('dotencode')
74 74 # create an invalid changelog
75 75 self.vfs.append(
76 76 "00changelog.i",
77 77 '\0\0\0\2' # represents revlogv2
78 78 ' dummy changelog to prevent using the old repo layout'
79 79 )
80 80 if self.ui.configbool('format', 'generaldelta', False):
81 81 requirements.append("generaldelta")
82 82 requirements = set(requirements)
83 83 else:
84 84 raise error.RepoError(_("repository %s not found") % path)
85 85 elif create:
86 86 raise error.RepoError(_("repository %s already exists") % path)
87 87 else:
88 88 try:
89 89 requirements = scmutil.readrequires(self.vfs, self.supported)
90 90 except IOError, inst:
91 91 if inst.errno != errno.ENOENT:
92 92 raise
93 93 requirements = set()
94 94
95 95 self.sharedpath = self.path
96 96 try:
97 97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
98 98 if not os.path.exists(s):
99 99 raise error.RepoError(
100 100 _('.hg/sharedpath points to nonexistent directory %s') % s)
101 101 self.sharedpath = s
102 102 except IOError, inst:
103 103 if inst.errno != errno.ENOENT:
104 104 raise
105 105
106 106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
107 107 self.spath = self.store.path
108 108 self.sopener = self.store.opener
109 109 self.svfs = self.sopener
110 110 self.sjoin = self.store.join
111 111 self.opener.createmode = self.store.createmode
112 112 self._applyrequirements(requirements)
113 113 if create:
114 114 self._writerequirements()
115 115
116 116
117 117 self._branchcache = None
118 118 self._branchcachetip = None
119 119 self.filterpats = {}
120 120 self._datafilters = {}
121 121 self._transref = self._lockref = self._wlockref = None
122 122
123 123 # A cache for various files under .hg/ that tracks file changes,
124 124 # (used by the filecache decorator)
125 125 #
126 126 # Maps a property name to its util.filecacheentry
127 127 self._filecache = {}
128 128
129 129 def _applyrequirements(self, requirements):
130 130 self.requirements = requirements
131 131 self.sopener.options = dict((r, 1) for r in requirements
132 132 if r in self.openerreqs)
133 133
134 134 def _writerequirements(self):
135 135 reqfile = self.opener("requires", "w")
136 136 for r in self.requirements:
137 137 reqfile.write("%s\n" % r)
138 138 reqfile.close()
139 139
140 140 def _checknested(self, path):
141 141 """Determine if path is a legal nested repository."""
142 142 if not path.startswith(self.root):
143 143 return False
144 144 subpath = path[len(self.root) + 1:]
145 145 normsubpath = util.pconvert(subpath)
146 146
147 147 # XXX: Checking against the current working copy is wrong in
148 148 # the sense that it can reject things like
149 149 #
150 150 # $ hg cat -r 10 sub/x.txt
151 151 #
152 152 # if sub/ is no longer a subrepository in the working copy
153 153 # parent revision.
154 154 #
155 155 # However, it can of course also allow things that would have
156 156 # been rejected before, such as the above cat command if sub/
157 157 # is a subrepository now, but was a normal directory before.
158 158 # The old path auditor would have rejected by mistake since it
159 159 # panics when it sees sub/.hg/.
160 160 #
161 161 # All in all, checking against the working copy seems sensible
162 162 # since we want to prevent access to nested repositories on
163 163 # the filesystem *now*.
164 164 ctx = self[None]
165 165 parts = util.splitpath(subpath)
166 166 while parts:
167 167 prefix = '/'.join(parts)
168 168 if prefix in ctx.substate:
169 169 if prefix == normsubpath:
170 170 return True
171 171 else:
172 172 sub = ctx.sub(prefix)
173 173 return sub.checknested(subpath[len(prefix) + 1:])
174 174 else:
175 175 parts.pop()
176 176 return False
177 177
178 178 @filecache('bookmarks')
179 179 def _bookmarks(self):
180 180 return bookmarks.read(self)
181 181
182 182 @filecache('bookmarks.current')
183 183 def _bookmarkcurrent(self):
184 184 return bookmarks.readcurrent(self)
185 185
186 186 def _writebookmarks(self, marks):
187 187 bookmarks.write(self)
188 188
189 189 def bookmarkheads(self, bookmark):
190 190 name = bookmark.split('@', 1)[0]
191 191 heads = []
192 192 for mark, n in self._bookmarks.iteritems():
193 193 if mark.split('@', 1)[0] == name:
194 194 heads.append(n)
195 195 return heads
196 196
197 197 @storecache('phaseroots')
198 198 def _phasecache(self):
199 199 return phases.phasecache(self, self._phasedefaults)
200 200
201 201 @storecache('obsstore')
202 202 def obsstore(self):
203 203 store = obsolete.obsstore(self.sopener)
204 204 return store
205 205
206 206 @storecache('00changelog.i')
207 207 def changelog(self):
208 208 c = changelog.changelog(self.sopener)
209 209 if 'HG_PENDING' in os.environ:
210 210 p = os.environ['HG_PENDING']
211 211 if p.startswith(self.root):
212 212 c.readpending('00changelog.i.a')
213 213 return c
214 214
215 215 @storecache('00manifest.i')
216 216 def manifest(self):
217 217 return manifest.manifest(self.sopener)
218 218
219 219 @filecache('dirstate')
220 220 def dirstate(self):
221 221 warned = [0]
222 222 def validate(node):
223 223 try:
224 224 self.changelog.rev(node)
225 225 return node
226 226 except error.LookupError:
227 227 if not warned[0]:
228 228 warned[0] = True
229 229 self.ui.warn(_("warning: ignoring unknown"
230 230 " working parent %s!\n") % short(node))
231 231 return nullid
232 232
233 233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
234 234
235 235 def __getitem__(self, changeid):
236 236 if changeid is None:
237 237 return context.workingctx(self)
238 238 return context.changectx(self, changeid)
239 239
240 240 def __contains__(self, changeid):
241 241 try:
242 242 return bool(self.lookup(changeid))
243 243 except error.RepoLookupError:
244 244 return False
245 245
246 246 def __nonzero__(self):
247 247 return True
248 248
249 249 def __len__(self):
250 250 return len(self.changelog)
251 251
252 252 def __iter__(self):
253 253 for i in xrange(len(self)):
254 254 yield i
255 255
256 256 def revs(self, expr, *args):
257 257 '''Return a list of revisions matching the given revset'''
258 258 expr = revset.formatspec(expr, *args)
259 259 m = revset.match(None, expr)
260 260 return [r for r in m(self, range(len(self)))]
261 261
262 262 def set(self, expr, *args):
263 263 '''
264 264 Yield a context for each matching revision, after doing arg
265 265 replacement via revset.formatspec
266 266 '''
267 267 for r in self.revs(expr, *args):
268 268 yield self[r]
269 269
270 270 def url(self):
271 271 return 'file:' + self.root
272 272
273 273 def hook(self, name, throw=False, **args):
274 274 return hook.hook(self.ui, self, name, throw, **args)
275 275
276 276 tag_disallowed = ':\r\n'
277 277
278 278 def _tag(self, names, node, message, local, user, date, extra={}):
279 279 if isinstance(names, str):
280 280 allchars = names
281 281 names = (names,)
282 282 else:
283 283 allchars = ''.join(names)
284 284 for c in self.tag_disallowed:
285 285 if c in allchars:
286 286 raise util.Abort(_('%r cannot be used in a tag name') % c)
287 287
288 288 branches = self.branchmap()
289 289 for name in names:
290 290 self.hook('pretag', throw=True, node=hex(node), tag=name,
291 291 local=local)
292 292 if name in branches:
293 293 self.ui.warn(_("warning: tag %s conflicts with existing"
294 294 " branch name\n") % name)
295 295
296 296 def writetags(fp, names, munge, prevtags):
297 297 fp.seek(0, 2)
298 298 if prevtags and prevtags[-1] != '\n':
299 299 fp.write('\n')
300 300 for name in names:
301 301 m = munge and munge(name) or name
302 302 if (self._tagscache.tagtypes and
303 303 name in self._tagscache.tagtypes):
304 304 old = self.tags().get(name, nullid)
305 305 fp.write('%s %s\n' % (hex(old), m))
306 306 fp.write('%s %s\n' % (hex(node), m))
307 307 fp.close()
308 308
309 309 prevtags = ''
310 310 if local:
311 311 try:
312 312 fp = self.opener('localtags', 'r+')
313 313 except IOError:
314 314 fp = self.opener('localtags', 'a')
315 315 else:
316 316 prevtags = fp.read()
317 317
318 318 # local tags are stored in the current charset
319 319 writetags(fp, names, None, prevtags)
320 320 for name in names:
321 321 self.hook('tag', node=hex(node), tag=name, local=local)
322 322 return
323 323
324 324 try:
325 325 fp = self.wfile('.hgtags', 'rb+')
326 326 except IOError, e:
327 327 if e.errno != errno.ENOENT:
328 328 raise
329 329 fp = self.wfile('.hgtags', 'ab')
330 330 else:
331 331 prevtags = fp.read()
332 332
333 333 # committed tags are stored in UTF-8
334 334 writetags(fp, names, encoding.fromlocal, prevtags)
335 335
336 336 fp.close()
337 337
338 338 self.invalidatecaches()
339 339
340 340 if '.hgtags' not in self.dirstate:
341 341 self[None].add(['.hgtags'])
342 342
343 343 m = matchmod.exact(self.root, '', ['.hgtags'])
344 344 tagnode = self.commit(message, user, date, extra=extra, match=m)
345 345
346 346 for name in names:
347 347 self.hook('tag', node=hex(node), tag=name, local=local)
348 348
349 349 return tagnode
350 350
351 351 def tag(self, names, node, message, local, user, date):
352 352 '''tag a revision with one or more symbolic names.
353 353
354 354 names is a list of strings or, when adding a single tag, names may be a
355 355 string.
356 356
357 357 if local is True, the tags are stored in a per-repository file.
358 358 otherwise, they are stored in the .hgtags file, and a new
359 359 changeset is committed with the change.
360 360
361 361 keyword arguments:
362 362
363 363 local: whether to store tags in non-version-controlled file
364 364 (default False)
365 365
366 366 message: commit message to use if committing
367 367
368 368 user: name of user to use if committing
369 369
370 370 date: date tuple to use if committing'''
371 371
372 372 if not local:
373 373 for x in self.status()[:5]:
374 374 if '.hgtags' in x:
375 375 raise util.Abort(_('working copy of .hgtags is changed '
376 376 '(please commit .hgtags manually)'))
377 377
378 378 self.tags() # instantiate the cache
379 379 self._tag(names, node, message, local, user, date)
380 380
381 381 @propertycache
382 382 def _tagscache(self):
383 383 '''Returns a tagscache object that contains various tags related
384 384 caches.'''
385 385
386 386 # This simplifies its cache management by having one decorated
387 387 # function (this one) and the rest simply fetch things from it.
388 388 class tagscache(object):
389 389 def __init__(self):
390 390 # These two define the set of tags for this repository. tags
391 391 # maps tag name to node; tagtypes maps tag name to 'global' or
392 392 # 'local'. (Global tags are defined by .hgtags across all
393 393 # heads, and local tags are defined in .hg/localtags.)
394 394 # They constitute the in-memory cache of tags.
395 395 self.tags = self.tagtypes = None
396 396
397 397 self.nodetagscache = self.tagslist = None
398 398
399 399 cache = tagscache()
400 400 cache.tags, cache.tagtypes = self._findtags()
401 401
402 402 return cache
403 403
404 404 def tags(self):
405 405 '''return a mapping of tag to node'''
406 406 t = {}
407 407 for k, v in self._tagscache.tags.iteritems():
408 408 try:
409 409 # ignore tags to unknown nodes
410 410 self.changelog.rev(v)
411 411 t[k] = v
412 412 except (error.LookupError, ValueError):
413 413 pass
414 414 return t
415 415
416 416 def _findtags(self):
417 417 '''Do the hard work of finding tags. Return a pair of dicts
418 418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
419 419 maps tag name to a string like \'global\' or \'local\'.
420 420 Subclasses or extensions are free to add their own tags, but
421 421 should be aware that the returned dicts will be retained for the
422 422 duration of the localrepo object.'''
423 423
424 424 # XXX what tagtype should subclasses/extensions use? Currently
425 425 # mq and bookmarks add tags, but do not set the tagtype at all.
426 426 # Should each extension invent its own tag type? Should there
427 427 # be one tagtype for all such "virtual" tags? Or is the status
428 428 # quo fine?
429 429
430 430 alltags = {} # map tag name to (node, hist)
431 431 tagtypes = {}
432 432
433 433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
434 434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
435 435
436 436 # Build the return dicts. Have to re-encode tag names because
437 437 # the tags module always uses UTF-8 (in order not to lose info
438 438 # writing to the cache), but the rest of Mercurial wants them in
439 439 # local encoding.
440 440 tags = {}
441 441 for (name, (node, hist)) in alltags.iteritems():
442 442 if node != nullid:
443 443 tags[encoding.tolocal(name)] = node
444 444 tags['tip'] = self.changelog.tip()
445 445 tagtypes = dict([(encoding.tolocal(name), value)
446 446 for (name, value) in tagtypes.iteritems()])
447 447 return (tags, tagtypes)
448 448
449 449 def tagtype(self, tagname):
450 450 '''
451 451 return the type of the given tag. result can be:
452 452
453 453 'local' : a local tag
454 454 'global' : a global tag
455 455 None : tag does not exist
456 456 '''
457 457
458 458 return self._tagscache.tagtypes.get(tagname)
459 459
460 460 def tagslist(self):
461 461 '''return a list of tags ordered by revision'''
462 462 if not self._tagscache.tagslist:
463 463 l = []
464 464 for t, n in self.tags().iteritems():
465 465 r = self.changelog.rev(n)
466 466 l.append((r, t, n))
467 467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
468 468
469 469 return self._tagscache.tagslist
470 470
471 471 def nodetags(self, node):
472 472 '''return the tags associated with a node'''
473 473 if not self._tagscache.nodetagscache:
474 474 nodetagscache = {}
475 475 for t, n in self._tagscache.tags.iteritems():
476 476 nodetagscache.setdefault(n, []).append(t)
477 477 for tags in nodetagscache.itervalues():
478 478 tags.sort()
479 479 self._tagscache.nodetagscache = nodetagscache
480 480 return self._tagscache.nodetagscache.get(node, [])
481 481
482 482 def nodebookmarks(self, node):
483 483 marks = []
484 484 for bookmark, n in self._bookmarks.iteritems():
485 485 if n == node:
486 486 marks.append(bookmark)
487 487 return sorted(marks)
488 488
489 489 def _branchtags(self, partial, lrev):
490 490 # TODO: rename this function?
491 491 tiprev = len(self) - 1
492 492 if lrev != tiprev:
493 493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
494 494 self._updatebranchcache(partial, ctxgen)
495 495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
496 496
497 497 return partial
498 498
499 499 def updatebranchcache(self):
500 500 tip = self.changelog.tip()
501 501 if self._branchcache is not None and self._branchcachetip == tip:
502 502 return
503 503
504 504 oldtip = self._branchcachetip
505 505 self._branchcachetip = tip
506 506 if oldtip is None or oldtip not in self.changelog.nodemap:
507 507 partial, last, lrev = self._readbranchcache()
508 508 else:
509 509 lrev = self.changelog.rev(oldtip)
510 510 partial = self._branchcache
511 511
512 512 self._branchtags(partial, lrev)
513 513 # this private cache holds all heads (not just the branch tips)
514 514 self._branchcache = partial
515 515
516 516 def branchmap(self):
517 517 '''returns a dictionary {branch: [branchheads]}'''
518 518 self.updatebranchcache()
519 519 return self._branchcache
520 520
521 521 def _branchtip(self, heads):
522 522 '''return the tipmost branch head in heads'''
523 523 tip = heads[-1]
524 524 for h in reversed(heads):
525 525 if not self[h].closesbranch():
526 526 tip = h
527 527 break
528 528 return tip
529 529
530 530 def branchtip(self, branch):
531 531 '''return the tip node for a given branch'''
532 532 if branch not in self.branchmap():
533 533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
534 534 return self._branchtip(self.branchmap()[branch])
535 535
536 536 def branchtags(self):
537 537 '''return a dict where branch names map to the tipmost head of
538 538 the branch, open heads come before closed'''
539 539 bt = {}
540 540 for bn, heads in self.branchmap().iteritems():
541 541 bt[bn] = self._branchtip(heads)
542 542 return bt
543 543
544 544 def _readbranchcache(self):
545 545 partial = {}
546 546 try:
547 547 f = self.opener("cache/branchheads")
548 548 lines = f.read().split('\n')
549 549 f.close()
550 550 except (IOError, OSError):
551 551 return {}, nullid, nullrev
552 552
553 553 try:
554 554 last, lrev = lines.pop(0).split(" ", 1)
555 555 last, lrev = bin(last), int(lrev)
556 556 if lrev >= len(self) or self[lrev].node() != last:
557 557 # invalidate the cache
558 558 raise ValueError('invalidating branch cache (tip differs)')
559 559 for l in lines:
560 560 if not l:
561 561 continue
562 562 node, label = l.split(" ", 1)
563 563 label = encoding.tolocal(label.strip())
564 564 if not node in self:
565 565 raise ValueError('invalidating branch cache because node '+
566 566 '%s does not exist' % node)
567 567 partial.setdefault(label, []).append(bin(node))
568 568 except KeyboardInterrupt:
569 569 raise
570 570 except Exception, inst:
571 571 if self.ui.debugflag:
572 572 self.ui.warn(str(inst), '\n')
573 573 partial, last, lrev = {}, nullid, nullrev
574 574 return partial, last, lrev
575 575
576 576 def _writebranchcache(self, branches, tip, tiprev):
577 577 try:
578 578 f = self.opener("cache/branchheads", "w", atomictemp=True)
579 579 f.write("%s %s\n" % (hex(tip), tiprev))
580 580 for label, nodes in branches.iteritems():
581 581 for node in nodes:
582 582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
583 583 f.close()
584 584 except (IOError, OSError):
585 585 pass
586 586
587 587 def _updatebranchcache(self, partial, ctxgen):
588 588 """Given a branchhead cache, partial, that may have extra nodes or be
589 589 missing heads, and a generator of nodes that are at least a superset of
590 590 heads missing, this function updates partial to be correct.
591 591 """
592 592 # collect new branch entries
593 593 newbranches = {}
594 594 for c in ctxgen:
595 595 newbranches.setdefault(c.branch(), []).append(c.node())
596 596 # if older branchheads are reachable from new ones, they aren't
597 597 # really branchheads. Note checking parents is insufficient:
598 598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
599 599 for branch, newnodes in newbranches.iteritems():
600 600 bheads = partial.setdefault(branch, [])
601 601 # Remove candidate heads that no longer are in the repo (e.g., as
602 602 # the result of a strip that just happened). Avoid using 'node in
603 603 # self' here because that dives down into branchcache code somewhat
604 604 # recrusively.
605 605 bheadrevs = [self.changelog.rev(node) for node in bheads
606 606 if self.changelog.hasnode(node)]
607 607 newheadrevs = [self.changelog.rev(node) for node in newnodes
608 608 if self.changelog.hasnode(node)]
609 609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
610 610 # Remove duplicates - nodes that are in newheadrevs and are already
611 611 # in bheadrevs. This can happen if you strip a node whose parent
612 612 # was already a head (because they're on different branches).
613 613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
614 614
615 615 # Starting from tip means fewer passes over reachable. If we know
616 616 # the new candidates are not ancestors of existing heads, we don't
617 617 # have to examine ancestors of existing heads
618 618 if ctxisnew:
619 619 iterrevs = sorted(newheadrevs)
620 620 else:
621 621 iterrevs = list(bheadrevs)
622 622
623 623 # This loop prunes out two kinds of heads - heads that are
624 624 # superceded by a head in newheadrevs, and newheadrevs that are not
625 625 # heads because an existing head is their descendant.
626 626 while iterrevs:
627 627 latest = iterrevs.pop()
628 628 if latest not in bheadrevs:
629 629 continue
630 630 ancestors = set(self.changelog.ancestors([latest],
631 631 bheadrevs[0]))
632 632 if ancestors:
633 633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
634 634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
635 635
636 636 # There may be branches that cease to exist when the last commit in the
637 637 # branch was stripped. This code filters them out. Note that the
638 638 # branch that ceased to exist may not be in newbranches because
639 639 # newbranches is the set of candidate heads, which when you strip the
640 640 # last commit in a branch will be the parent branch.
641 641 for branch in partial:
642 642 nodes = [head for head in partial[branch]
643 643 if self.changelog.hasnode(head)]
644 644 if not nodes:
645 645 del partial[branch]
646 646
647 647 def lookup(self, key):
648 648 return self[key].node()
649 649
650 650 def lookupbranch(self, key, remote=None):
651 651 repo = remote or self
652 652 if key in repo.branchmap():
653 653 return key
654 654
655 655 repo = (remote and remote.local()) and remote or self
656 656 return repo[key].branch()
657 657
658 658 def known(self, nodes):
659 659 nm = self.changelog.nodemap
660 660 pc = self._phasecache
661 661 result = []
662 662 for n in nodes:
663 663 r = nm.get(n)
664 664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
665 665 result.append(resp)
666 666 return result
667 667
668 668 def local(self):
669 669 return self
670 670
671 671 def join(self, f):
672 672 return os.path.join(self.path, f)
673 673
674 674 def wjoin(self, f):
675 675 return os.path.join(self.root, f)
676 676
677 677 def file(self, f):
678 678 if f[0] == '/':
679 679 f = f[1:]
680 680 return filelog.filelog(self.sopener, f)
681 681
682 682 def changectx(self, changeid):
683 683 return self[changeid]
684 684
685 685 def parents(self, changeid=None):
686 686 '''get list of changectxs for parents of changeid'''
687 687 return self[changeid].parents()
688 688
689 689 def setparents(self, p1, p2=nullid):
690 690 copies = self.dirstate.setparents(p1, p2)
691 691 if copies:
692 692 # Adjust copy records, the dirstate cannot do it, it
693 693 # requires access to parents manifests. Preserve them
694 694 # only for entries added to first parent.
695 695 pctx = self[p1]
696 696 for f in copies:
697 697 if f not in pctx and copies[f] in pctx:
698 698 self.dirstate.copy(copies[f], f)
699 699
700 700 def filectx(self, path, changeid=None, fileid=None):
701 701 """changeid can be a changeset revision, node, or tag.
702 702 fileid can be a file revision or node."""
703 703 return context.filectx(self, path, changeid, fileid)
704 704
705 705 def getcwd(self):
706 706 return self.dirstate.getcwd()
707 707
708 708 def pathto(self, f, cwd=None):
709 709 return self.dirstate.pathto(f, cwd)
710 710
711 711 def wfile(self, f, mode='r'):
712 712 return self.wopener(f, mode)
713 713
714 714 def _link(self, f):
715 715 return os.path.islink(self.wjoin(f))
716 716
717 717 def _loadfilter(self, filter):
718 718 if filter not in self.filterpats:
719 719 l = []
720 720 for pat, cmd in self.ui.configitems(filter):
721 721 if cmd == '!':
722 722 continue
723 723 mf = matchmod.match(self.root, '', [pat])
724 724 fn = None
725 725 params = cmd
726 726 for name, filterfn in self._datafilters.iteritems():
727 727 if cmd.startswith(name):
728 728 fn = filterfn
729 729 params = cmd[len(name):].lstrip()
730 730 break
731 731 if not fn:
732 732 fn = lambda s, c, **kwargs: util.filter(s, c)
733 733 # Wrap old filters not supporting keyword arguments
734 734 if not inspect.getargspec(fn)[2]:
735 735 oldfn = fn
736 736 fn = lambda s, c, **kwargs: oldfn(s, c)
737 737 l.append((mf, fn, params))
738 738 self.filterpats[filter] = l
739 739 return self.filterpats[filter]
740 740
741 741 def _filter(self, filterpats, filename, data):
742 742 for mf, fn, cmd in filterpats:
743 743 if mf(filename):
744 744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
745 745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
746 746 break
747 747
748 748 return data
749 749
750 750 @propertycache
751 751 def _encodefilterpats(self):
752 752 return self._loadfilter('encode')
753 753
754 754 @propertycache
755 755 def _decodefilterpats(self):
756 756 return self._loadfilter('decode')
757 757
758 758 def adddatafilter(self, name, filter):
759 759 self._datafilters[name] = filter
760 760
761 761 def wread(self, filename):
762 762 if self._link(filename):
763 763 data = os.readlink(self.wjoin(filename))
764 764 else:
765 765 data = self.wopener.read(filename)
766 766 return self._filter(self._encodefilterpats, filename, data)
767 767
768 768 def wwrite(self, filename, data, flags):
769 769 data = self._filter(self._decodefilterpats, filename, data)
770 770 if 'l' in flags:
771 771 self.wopener.symlink(data, filename)
772 772 else:
773 773 self.wopener.write(filename, data)
774 774 if 'x' in flags:
775 775 util.setflags(self.wjoin(filename), False, True)
776 776
777 777 def wwritedata(self, filename, data):
778 778 return self._filter(self._decodefilterpats, filename, data)
779 779
780 780 def transaction(self, desc):
781 781 tr = self._transref and self._transref() or None
782 782 if tr and tr.running():
783 783 return tr.nest()
784 784
785 785 # abort here if the journal already exists
786 786 if os.path.exists(self.sjoin("journal")):
787 787 raise error.RepoError(
788 788 _("abandoned transaction found - run hg recover"))
789 789
790 790 self._writejournal(desc)
791 791 renames = [(x, undoname(x)) for x in self._journalfiles()]
792 792
793 793 tr = transaction.transaction(self.ui.warn, self.sopener,
794 794 self.sjoin("journal"),
795 795 aftertrans(renames),
796 796 self.store.createmode)
797 797 self._transref = weakref.ref(tr)
798 798 return tr
799 799
800 800 def _journalfiles(self):
801 801 return (self.sjoin('journal'), self.join('journal.dirstate'),
802 802 self.join('journal.branch'), self.join('journal.desc'),
803 803 self.join('journal.bookmarks'),
804 804 self.sjoin('journal.phaseroots'))
805 805
806 806 def undofiles(self):
807 807 return [undoname(x) for x in self._journalfiles()]
808 808
809 809 def _writejournal(self, desc):
810 810 self.opener.write("journal.dirstate",
811 811 self.opener.tryread("dirstate"))
812 812 self.opener.write("journal.branch",
813 813 encoding.fromlocal(self.dirstate.branch()))
814 814 self.opener.write("journal.desc",
815 815 "%d\n%s\n" % (len(self), desc))
816 816 self.opener.write("journal.bookmarks",
817 817 self.opener.tryread("bookmarks"))
818 818 self.sopener.write("journal.phaseroots",
819 819 self.sopener.tryread("phaseroots"))
820 820
821 821 def recover(self):
822 822 lock = self.lock()
823 823 try:
824 824 if os.path.exists(self.sjoin("journal")):
825 825 self.ui.status(_("rolling back interrupted transaction\n"))
826 826 transaction.rollback(self.sopener, self.sjoin("journal"),
827 827 self.ui.warn)
828 828 self.invalidate()
829 829 return True
830 830 else:
831 831 self.ui.warn(_("no interrupted transaction available\n"))
832 832 return False
833 833 finally:
834 834 lock.release()
835 835
836 836 def rollback(self, dryrun=False, force=False):
837 837 wlock = lock = None
838 838 try:
839 839 wlock = self.wlock()
840 840 lock = self.lock()
841 841 if os.path.exists(self.sjoin("undo")):
842 842 return self._rollback(dryrun, force)
843 843 else:
844 844 self.ui.warn(_("no rollback information available\n"))
845 845 return 1
846 846 finally:
847 847 release(lock, wlock)
848 848
849 849 def _rollback(self, dryrun, force):
850 850 ui = self.ui
851 851 try:
852 852 args = self.opener.read('undo.desc').splitlines()
853 853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
854 854 if len(args) >= 3:
855 855 detail = args[2]
856 856 oldtip = oldlen - 1
857 857
858 858 if detail and ui.verbose:
859 859 msg = (_('repository tip rolled back to revision %s'
860 860 ' (undo %s: %s)\n')
861 861 % (oldtip, desc, detail))
862 862 else:
863 863 msg = (_('repository tip rolled back to revision %s'
864 864 ' (undo %s)\n')
865 865 % (oldtip, desc))
866 866 except IOError:
867 867 msg = _('rolling back unknown transaction\n')
868 868 desc = None
869 869
870 870 if not force and self['.'] != self['tip'] and desc == 'commit':
871 871 raise util.Abort(
872 872 _('rollback of last commit while not checked out '
873 873 'may lose data'), hint=_('use -f to force'))
874 874
875 875 ui.status(msg)
876 876 if dryrun:
877 877 return 0
878 878
879 879 parents = self.dirstate.parents()
880 880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
881 881 if os.path.exists(self.join('undo.bookmarks')):
882 882 util.rename(self.join('undo.bookmarks'),
883 883 self.join('bookmarks'))
884 884 if os.path.exists(self.sjoin('undo.phaseroots')):
885 885 util.rename(self.sjoin('undo.phaseroots'),
886 886 self.sjoin('phaseroots'))
887 887 self.invalidate()
888 888
889 889 parentgone = (parents[0] not in self.changelog.nodemap or
890 890 parents[1] not in self.changelog.nodemap)
891 891 if parentgone:
892 892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
893 893 try:
894 894 branch = self.opener.read('undo.branch')
895 895 self.dirstate.setbranch(branch)
896 896 except IOError:
897 897 ui.warn(_('named branch could not be reset: '
898 898 'current branch is still \'%s\'\n')
899 899 % self.dirstate.branch())
900 900
901 901 self.dirstate.invalidate()
902 902 parents = tuple([p.rev() for p in self.parents()])
903 903 if len(parents) > 1:
904 904 ui.status(_('working directory now based on '
905 905 'revisions %d and %d\n') % parents)
906 906 else:
907 907 ui.status(_('working directory now based on '
908 908 'revision %d\n') % parents)
909 909 # TODO: if we know which new heads may result from this rollback, pass
910 910 # them to destroy(), which will prevent the branchhead cache from being
911 911 # invalidated.
912 912 self.destroyed()
913 913 return 0
914 914
915 915 def invalidatecaches(self):
916 916 def delcache(name):
917 917 try:
918 918 delattr(self, name)
919 919 except AttributeError:
920 920 pass
921 921
922 922 delcache('_tagscache')
923 923
924 924 self._branchcache = None # in UTF-8
925 925 self._branchcachetip = None
926 926
927 927 def invalidatedirstate(self):
928 928 '''Invalidates the dirstate, causing the next call to dirstate
929 929 to check if it was modified since the last time it was read,
930 930 rereading it if it has.
931 931
932 932 This is different to dirstate.invalidate() that it doesn't always
933 933 rereads the dirstate. Use dirstate.invalidate() if you want to
934 934 explicitly read the dirstate again (i.e. restoring it to a previous
935 935 known good state).'''
936 936 if 'dirstate' in self.__dict__:
937 937 for k in self.dirstate._filecache:
938 938 try:
939 939 delattr(self.dirstate, k)
940 940 except AttributeError:
941 941 pass
942 942 delattr(self, 'dirstate')
943 943
944 944 def invalidate(self):
945 945 for k in self._filecache:
946 946 # dirstate is invalidated separately in invalidatedirstate()
947 947 if k == 'dirstate':
948 948 continue
949 949
950 950 try:
951 951 delattr(self, k)
952 952 except AttributeError:
953 953 pass
954 954 self.invalidatecaches()
955 955
956 956 # Discard all cache entries to force reloading everything.
957 957 self._filecache.clear()
958 958
959 959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
960 960 try:
961 961 l = lock.lock(lockname, 0, releasefn, desc=desc)
962 962 except error.LockHeld, inst:
963 963 if not wait:
964 964 raise
965 965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
966 966 (desc, inst.locker))
967 967 # default to 600 seconds timeout
968 968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
969 969 releasefn, desc=desc)
970 970 if acquirefn:
971 971 acquirefn()
972 972 return l
973 973
974 974 def _afterlock(self, callback):
975 975 """add a callback to the current repository lock.
976 976
977 977 The callback will be executed on lock release."""
978 978 l = self._lockref and self._lockref()
979 979 if l:
980 980 l.postrelease.append(callback)
981 981 else:
982 982 callback()
983 983
984 984 def lock(self, wait=True):
985 985 '''Lock the repository store (.hg/store) and return a weak reference
986 986 to the lock. Use this before modifying the store (e.g. committing or
987 987 stripping). If you are opening a transaction, get a lock as well.)'''
988 988 l = self._lockref and self._lockref()
989 989 if l is not None and l.held:
990 990 l.lock()
991 991 return l
992 992
993 993 def unlock():
994 994 self.store.write()
995 995 if '_phasecache' in vars(self):
996 996 self._phasecache.write()
997 997 for k, ce in self._filecache.items():
998 998 if k == 'dirstate':
999 999 continue
1000 1000 ce.refresh()
1001 1001
1002 1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1003 1003 self.invalidate, _('repository %s') % self.origroot)
1004 1004 self._lockref = weakref.ref(l)
1005 1005 return l
1006 1006
1007 1007 def wlock(self, wait=True):
1008 1008 '''Lock the non-store parts of the repository (everything under
1009 1009 .hg except .hg/store) and return a weak reference to the lock.
1010 1010 Use this before modifying files in .hg.'''
1011 1011 l = self._wlockref and self._wlockref()
1012 1012 if l is not None and l.held:
1013 1013 l.lock()
1014 1014 return l
1015 1015
1016 1016 def unlock():
1017 1017 self.dirstate.write()
1018 1018 ce = self._filecache.get('dirstate')
1019 1019 if ce:
1020 1020 ce.refresh()
1021 1021
1022 1022 l = self._lock(self.join("wlock"), wait, unlock,
1023 1023 self.invalidatedirstate, _('working directory of %s') %
1024 1024 self.origroot)
1025 1025 self._wlockref = weakref.ref(l)
1026 1026 return l
1027 1027
1028 1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1029 1029 """
1030 1030 commit an individual file as part of a larger transaction
1031 1031 """
1032 1032
1033 1033 fname = fctx.path()
1034 1034 text = fctx.data()
1035 1035 flog = self.file(fname)
1036 1036 fparent1 = manifest1.get(fname, nullid)
1037 1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1038 1038
1039 1039 meta = {}
1040 1040 copy = fctx.renamed()
1041 1041 if copy and copy[0] != fname:
1042 1042 # Mark the new revision of this file as a copy of another
1043 1043 # file. This copy data will effectively act as a parent
1044 1044 # of this new revision. If this is a merge, the first
1045 1045 # parent will be the nullid (meaning "look up the copy data")
1046 1046 # and the second one will be the other parent. For example:
1047 1047 #
1048 1048 # 0 --- 1 --- 3 rev1 changes file foo
1049 1049 # \ / rev2 renames foo to bar and changes it
1050 1050 # \- 2 -/ rev3 should have bar with all changes and
1051 1051 # should record that bar descends from
1052 1052 # bar in rev2 and foo in rev1
1053 1053 #
1054 1054 # this allows this merge to succeed:
1055 1055 #
1056 1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1057 1057 # \ / merging rev3 and rev4 should use bar@rev2
1058 1058 # \- 2 --- 4 as the merge base
1059 1059 #
1060 1060
1061 1061 cfname = copy[0]
1062 1062 crev = manifest1.get(cfname)
1063 1063 newfparent = fparent2
1064 1064
1065 1065 if manifest2: # branch merge
1066 1066 if fparent2 == nullid or crev is None: # copied on remote side
1067 1067 if cfname in manifest2:
1068 1068 crev = manifest2[cfname]
1069 1069 newfparent = fparent1
1070 1070
1071 1071 # find source in nearest ancestor if we've lost track
1072 1072 if not crev:
1073 1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1074 1074 (fname, cfname))
1075 1075 for ancestor in self[None].ancestors():
1076 1076 if cfname in ancestor:
1077 1077 crev = ancestor[cfname].filenode()
1078 1078 break
1079 1079
1080 1080 if crev:
1081 1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1082 1082 meta["copy"] = cfname
1083 1083 meta["copyrev"] = hex(crev)
1084 1084 fparent1, fparent2 = nullid, newfparent
1085 1085 else:
1086 1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1087 1087 "copied from '%s'!\n") % (fname, cfname))
1088 1088
1089 1089 elif fparent2 != nullid:
1090 1090 # is one parent an ancestor of the other?
1091 1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1092 1092 if fparentancestor == fparent1:
1093 1093 fparent1, fparent2 = fparent2, nullid
1094 1094 elif fparentancestor == fparent2:
1095 1095 fparent2 = nullid
1096 1096
1097 1097 # is the file changed?
1098 1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1099 1099 changelist.append(fname)
1100 1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1101 1101
1102 1102 # are just the flags changed during merge?
1103 1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1104 1104 changelist.append(fname)
1105 1105
1106 1106 return fparent1
1107 1107
1108 1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1109 1109 editor=False, extra={}):
1110 1110 """Add a new revision to current repository.
1111 1111
1112 1112 Revision information is gathered from the working directory,
1113 1113 match can be used to filter the committed files. If editor is
1114 1114 supplied, it is called to get a commit message.
1115 1115 """
1116 1116
1117 1117 def fail(f, msg):
1118 1118 raise util.Abort('%s: %s' % (f, msg))
1119 1119
1120 1120 if not match:
1121 1121 match = matchmod.always(self.root, '')
1122 1122
1123 1123 if not force:
1124 1124 vdirs = []
1125 1125 match.dir = vdirs.append
1126 1126 match.bad = fail
1127 1127
1128 1128 wlock = self.wlock()
1129 1129 try:
1130 1130 wctx = self[None]
1131 1131 merge = len(wctx.parents()) > 1
1132 1132
1133 1133 if (not force and merge and match and
1134 1134 (match.files() or match.anypats())):
1135 1135 raise util.Abort(_('cannot partially commit a merge '
1136 1136 '(do not specify files or patterns)'))
1137 1137
1138 1138 changes = self.status(match=match, clean=force)
1139 1139 if force:
1140 1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1141 1141
1142 1142 # check subrepos
1143 1143 subs = []
1144 1144 commitsubs = set()
1145 1145 newstate = wctx.substate.copy()
1146 1146 # only manage subrepos and .hgsubstate if .hgsub is present
1147 1147 if '.hgsub' in wctx:
1148 1148 # we'll decide whether to track this ourselves, thanks
1149 1149 if '.hgsubstate' in changes[0]:
1150 1150 changes[0].remove('.hgsubstate')
1151 1151 if '.hgsubstate' in changes[2]:
1152 1152 changes[2].remove('.hgsubstate')
1153 1153
1154 1154 # compare current state to last committed state
1155 1155 # build new substate based on last committed state
1156 1156 oldstate = wctx.p1().substate
1157 1157 for s in sorted(newstate.keys()):
1158 1158 if not match(s):
1159 1159 # ignore working copy, use old state if present
1160 1160 if s in oldstate:
1161 1161 newstate[s] = oldstate[s]
1162 1162 continue
1163 1163 if not force:
1164 1164 raise util.Abort(
1165 1165 _("commit with new subrepo %s excluded") % s)
1166 1166 if wctx.sub(s).dirty(True):
1167 1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1168 1168 raise util.Abort(
1169 1169 _("uncommitted changes in subrepo %s") % s,
1170 1170 hint=_("use --subrepos for recursive commit"))
1171 1171 subs.append(s)
1172 1172 commitsubs.add(s)
1173 1173 else:
1174 1174 bs = wctx.sub(s).basestate()
1175 1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1176 1176 if oldstate.get(s, (None, None, None))[1] != bs:
1177 1177 subs.append(s)
1178 1178
1179 1179 # check for removed subrepos
1180 1180 for p in wctx.parents():
1181 1181 r = [s for s in p.substate if s not in newstate]
1182 1182 subs += [s for s in r if match(s)]
1183 1183 if subs:
1184 1184 if (not match('.hgsub') and
1185 1185 '.hgsub' in (wctx.modified() + wctx.added())):
1186 1186 raise util.Abort(
1187 1187 _("can't commit subrepos without .hgsub"))
1188 1188 changes[0].insert(0, '.hgsubstate')
1189 1189
1190 1190 elif '.hgsub' in changes[2]:
1191 1191 # clean up .hgsubstate when .hgsub is removed
1192 1192 if ('.hgsubstate' in wctx and
1193 1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1194 1194 changes[2].insert(0, '.hgsubstate')
1195 1195
1196 1196 # make sure all explicit patterns are matched
1197 1197 if not force and match.files():
1198 1198 matched = set(changes[0] + changes[1] + changes[2])
1199 1199
1200 1200 for f in match.files():
1201 1201 if f == '.' or f in matched or f in wctx.substate:
1202 1202 continue
1203 1203 if f in changes[3]: # missing
1204 1204 fail(f, _('file not found!'))
1205 1205 if f in vdirs: # visited directory
1206 1206 d = f + '/'
1207 1207 for mf in matched:
1208 1208 if mf.startswith(d):
1209 1209 break
1210 1210 else:
1211 1211 fail(f, _("no match under directory!"))
1212 1212 elif f not in self.dirstate:
1213 1213 fail(f, _("file not tracked!"))
1214 1214
1215 1215 if (not force and not extra.get("close") and not merge
1216 1216 and not (changes[0] or changes[1] or changes[2])
1217 1217 and wctx.branch() == wctx.p1().branch()):
1218 1218 return None
1219 1219
1220 1220 if merge and changes[3]:
1221 1221 raise util.Abort(_("cannot commit merge with missing files"))
1222 1222
1223 1223 ms = mergemod.mergestate(self)
1224 1224 for f in changes[0]:
1225 1225 if f in ms and ms[f] == 'u':
1226 1226 raise util.Abort(_("unresolved merge conflicts "
1227 1227 "(see hg help resolve)"))
1228 1228
1229 1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1230 1230 if editor:
1231 1231 cctx._text = editor(self, cctx, subs)
1232 1232 edited = (text != cctx._text)
1233 1233
1234 1234 # commit subs and write new state
1235 1235 if subs:
1236 1236 for s in sorted(commitsubs):
1237 1237 sub = wctx.sub(s)
1238 1238 self.ui.status(_('committing subrepository %s\n') %
1239 1239 subrepo.subrelpath(sub))
1240 1240 sr = sub.commit(cctx._text, user, date)
1241 1241 newstate[s] = (newstate[s][0], sr)
1242 1242 subrepo.writestate(self, newstate)
1243 1243
1244 1244 # Save commit message in case this transaction gets rolled back
1245 1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1246 1246 # the assumption that the user will use the same editor again.
1247 1247 msgfn = self.savecommitmessage(cctx._text)
1248 1248
1249 1249 p1, p2 = self.dirstate.parents()
1250 1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1251 1251 try:
1252 1252 self.hook("precommit", throw=True, parent1=hookp1,
1253 1253 parent2=hookp2)
1254 1254 ret = self.commitctx(cctx, True)
1255 1255 except: # re-raises
1256 1256 if edited:
1257 1257 self.ui.write(
1258 1258 _('note: commit message saved in %s\n') % msgfn)
1259 1259 raise
1260 1260
1261 1261 # update bookmarks, dirstate and mergestate
1262 1262 bookmarks.update(self, [p1, p2], ret)
1263 1263 for f in changes[0] + changes[1]:
1264 1264 self.dirstate.normal(f)
1265 1265 for f in changes[2]:
1266 1266 self.dirstate.drop(f)
1267 1267 self.dirstate.setparents(ret)
1268 1268 ms.reset()
1269 1269 finally:
1270 1270 wlock.release()
1271 1271
1272 1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1273 1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1274 1274 self._afterlock(commithook)
1275 1275 return ret
1276 1276
1277 1277 def commitctx(self, ctx, error=False):
1278 1278 """Add a new revision to current repository.
1279 1279 Revision information is passed via the context argument.
1280 1280 """
1281 1281
1282 1282 tr = lock = None
1283 1283 removed = list(ctx.removed())
1284 1284 p1, p2 = ctx.p1(), ctx.p2()
1285 1285 user = ctx.user()
1286 1286
1287 1287 lock = self.lock()
1288 1288 try:
1289 1289 tr = self.transaction("commit")
1290 1290 trp = weakref.proxy(tr)
1291 1291
1292 1292 if ctx.files():
1293 1293 m1 = p1.manifest().copy()
1294 1294 m2 = p2.manifest()
1295 1295
1296 1296 # check in files
1297 1297 new = {}
1298 1298 changed = []
1299 1299 linkrev = len(self)
1300 1300 for f in sorted(ctx.modified() + ctx.added()):
1301 1301 self.ui.note(f + "\n")
1302 1302 try:
1303 1303 fctx = ctx[f]
1304 1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1305 1305 changed)
1306 1306 m1.set(f, fctx.flags())
1307 1307 except OSError, inst:
1308 1308 self.ui.warn(_("trouble committing %s!\n") % f)
1309 1309 raise
1310 1310 except IOError, inst:
1311 1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1312 1312 if error or errcode and errcode != errno.ENOENT:
1313 1313 self.ui.warn(_("trouble committing %s!\n") % f)
1314 1314 raise
1315 1315 else:
1316 1316 removed.append(f)
1317 1317
1318 1318 # update manifest
1319 1319 m1.update(new)
1320 1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1321 1321 drop = [f for f in removed if f in m1]
1322 1322 for f in drop:
1323 1323 del m1[f]
1324 1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1325 1325 p2.manifestnode(), (new, drop))
1326 1326 files = changed + removed
1327 1327 else:
1328 1328 mn = p1.manifestnode()
1329 1329 files = []
1330 1330
1331 1331 # update changelog
1332 1332 self.changelog.delayupdate()
1333 1333 n = self.changelog.add(mn, files, ctx.description(),
1334 1334 trp, p1.node(), p2.node(),
1335 1335 user, ctx.date(), ctx.extra().copy())
1336 1336 p = lambda: self.changelog.writepending() and self.root or ""
1337 1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1338 1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1339 1339 parent2=xp2, pending=p)
1340 1340 self.changelog.finalize(trp)
1341 1341 # set the new commit is proper phase
1342 1342 targetphase = phases.newcommitphase(self.ui)
1343 1343 if targetphase:
1344 1344 # retract boundary do not alter parent changeset.
1345 1345 # if a parent have higher the resulting phase will
1346 1346 # be compliant anyway
1347 1347 #
1348 1348 # if minimal phase was 0 we don't need to retract anything
1349 1349 phases.retractboundary(self, targetphase, [n])
1350 1350 tr.close()
1351 1351 self.updatebranchcache()
1352 1352 return n
1353 1353 finally:
1354 1354 if tr:
1355 1355 tr.release()
1356 1356 lock.release()
1357 1357
1358 1358 def destroyed(self, newheadnodes=None):
1359 1359 '''Inform the repository that nodes have been destroyed.
1360 1360 Intended for use by strip and rollback, so there's a common
1361 1361 place for anything that has to be done after destroying history.
1362 1362
1363 1363 If you know the branchheadcache was uptodate before nodes were removed
1364 1364 and you also know the set of candidate new heads that may have resulted
1365 1365 from the destruction, you can set newheadnodes. This will enable the
1366 1366 code to update the branchheads cache, rather than having future code
1367 1367 decide it's invalid and regenrating it from scratch.
1368 1368 '''
1369 1369 # If we have info, newheadnodes, on how to update the branch cache, do
1370 1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1371 1371 # will be caught the next time it is read.
1372 1372 if newheadnodes:
1373 1373 tiprev = len(self) - 1
1374 1374 ctxgen = (self[node] for node in newheadnodes
1375 1375 if self.changelog.hasnode(node))
1376 1376 self._updatebranchcache(self._branchcache, ctxgen)
1377 1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1378 1378 tiprev)
1379 1379
1380 1380 # Ensure the persistent tag cache is updated. Doing it now
1381 1381 # means that the tag cache only has to worry about destroyed
1382 1382 # heads immediately after a strip/rollback. That in turn
1383 1383 # guarantees that "cachetip == currenttip" (comparing both rev
1384 1384 # and node) always means no nodes have been added or destroyed.
1385 1385
1386 1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1387 1387 # head, refresh the tag cache, then immediately add a new head.
1388 1388 # But I think doing it this way is necessary for the "instant
1389 1389 # tag cache retrieval" case to work.
1390 1390 self.invalidatecaches()
1391 1391
1392 1392 def walk(self, match, node=None):
1393 1393 '''
1394 1394 walk recursively through the directory tree or a given
1395 1395 changeset, finding all files matched by the match
1396 1396 function
1397 1397 '''
1398 1398 return self[node].walk(match)
1399 1399
1400 1400 def status(self, node1='.', node2=None, match=None,
1401 1401 ignored=False, clean=False, unknown=False,
1402 1402 listsubrepos=False):
1403 1403 """return status of files between two nodes or node and working
1404 1404 directory.
1405 1405
1406 1406 If node1 is None, use the first dirstate parent instead.
1407 1407 If node2 is None, compare node1 with working directory.
1408 1408 """
1409 1409
1410 1410 def mfmatches(ctx):
1411 1411 mf = ctx.manifest().copy()
1412 1412 if match.always():
1413 1413 return mf
1414 1414 for fn in mf.keys():
1415 1415 if not match(fn):
1416 1416 del mf[fn]
1417 1417 return mf
1418 1418
1419 1419 if isinstance(node1, context.changectx):
1420 1420 ctx1 = node1
1421 1421 else:
1422 1422 ctx1 = self[node1]
1423 1423 if isinstance(node2, context.changectx):
1424 1424 ctx2 = node2
1425 1425 else:
1426 1426 ctx2 = self[node2]
1427 1427
1428 1428 working = ctx2.rev() is None
1429 1429 parentworking = working and ctx1 == self['.']
1430 1430 match = match or matchmod.always(self.root, self.getcwd())
1431 1431 listignored, listclean, listunknown = ignored, clean, unknown
1432 1432
1433 1433 # load earliest manifest first for caching reasons
1434 1434 if not working and ctx2.rev() < ctx1.rev():
1435 1435 ctx2.manifest()
1436 1436
1437 1437 if not parentworking:
1438 1438 def bad(f, msg):
1439 1439 # 'f' may be a directory pattern from 'match.files()',
1440 1440 # so 'f not in ctx1' is not enough
1441 1441 if f not in ctx1 and f not in ctx1.dirs():
1442 1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1443 1443 match.bad = bad
1444 1444
1445 1445 if working: # we need to scan the working dir
1446 1446 subrepos = []
1447 1447 if '.hgsub' in self.dirstate:
1448 1448 subrepos = ctx2.substate.keys()
1449 1449 s = self.dirstate.status(match, subrepos, listignored,
1450 1450 listclean, listunknown)
1451 1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1452 1452
1453 1453 # check for any possibly clean files
1454 1454 if parentworking and cmp:
1455 1455 fixup = []
1456 1456 # do a full compare of any files that might have changed
1457 1457 for f in sorted(cmp):
1458 1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1459 1459 or ctx1[f].cmp(ctx2[f])):
1460 1460 modified.append(f)
1461 1461 else:
1462 1462 fixup.append(f)
1463 1463
1464 1464 # update dirstate for files that are actually clean
1465 1465 if fixup:
1466 1466 if listclean:
1467 1467 clean += fixup
1468 1468
1469 1469 try:
1470 1470 # updating the dirstate is optional
1471 1471 # so we don't wait on the lock
1472 1472 wlock = self.wlock(False)
1473 1473 try:
1474 1474 for f in fixup:
1475 1475 self.dirstate.normal(f)
1476 1476 finally:
1477 1477 wlock.release()
1478 1478 except error.LockError:
1479 1479 pass
1480 1480
1481 1481 if not parentworking:
1482 1482 mf1 = mfmatches(ctx1)
1483 1483 if working:
1484 1484 # we are comparing working dir against non-parent
1485 1485 # generate a pseudo-manifest for the working dir
1486 1486 mf2 = mfmatches(self['.'])
1487 1487 for f in cmp + modified + added:
1488 1488 mf2[f] = None
1489 1489 mf2.set(f, ctx2.flags(f))
1490 1490 for f in removed:
1491 1491 if f in mf2:
1492 1492 del mf2[f]
1493 1493 else:
1494 1494 # we are comparing two revisions
1495 1495 deleted, unknown, ignored = [], [], []
1496 1496 mf2 = mfmatches(ctx2)
1497 1497
1498 1498 modified, added, clean = [], [], []
1499 1499 withflags = mf1.withflags() | mf2.withflags()
1500 1500 for fn in mf2:
1501 1501 if fn in mf1:
1502 1502 if (fn not in deleted and
1503 1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1504 1504 (mf1[fn] != mf2[fn] and
1505 1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1506 1506 modified.append(fn)
1507 1507 elif listclean:
1508 1508 clean.append(fn)
1509 1509 del mf1[fn]
1510 1510 elif fn not in deleted:
1511 1511 added.append(fn)
1512 1512 removed = mf1.keys()
1513 1513
1514 1514 if working and modified and not self.dirstate._checklink:
1515 1515 # Symlink placeholders may get non-symlink-like contents
1516 1516 # via user error or dereferencing by NFS or Samba servers,
1517 1517 # so we filter out any placeholders that don't look like a
1518 1518 # symlink
1519 1519 sane = []
1520 1520 for f in modified:
1521 1521 if ctx2.flags(f) == 'l':
1522 1522 d = ctx2[f].data()
1523 1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1524 1524 self.ui.debug('ignoring suspect symlink placeholder'
1525 1525 ' "%s"\n' % f)
1526 1526 continue
1527 1527 sane.append(f)
1528 1528 modified = sane
1529 1529
1530 1530 r = modified, added, removed, deleted, unknown, ignored, clean
1531 1531
1532 1532 if listsubrepos:
1533 1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1534 1534 if working:
1535 1535 rev2 = None
1536 1536 else:
1537 1537 rev2 = ctx2.substate[subpath][1]
1538 1538 try:
1539 1539 submatch = matchmod.narrowmatcher(subpath, match)
1540 1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1541 1541 clean=listclean, unknown=listunknown,
1542 1542 listsubrepos=True)
1543 1543 for rfiles, sfiles in zip(r, s):
1544 1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1545 1545 except error.LookupError:
1546 1546 self.ui.status(_("skipping missing subrepository: %s\n")
1547 1547 % subpath)
1548 1548
1549 1549 for l in r:
1550 1550 l.sort()
1551 1551 return r
1552 1552
1553 1553 def heads(self, start=None):
1554 1554 heads = self.changelog.heads(start)
1555 1555 # sort the output in rev descending order
1556 1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1557 1557
1558 1558 def branchheads(self, branch=None, start=None, closed=False):
1559 1559 '''return a (possibly filtered) list of heads for the given branch
1560 1560
1561 1561 Heads are returned in topological order, from newest to oldest.
1562 1562 If branch is None, use the dirstate branch.
1563 1563 If start is not None, return only heads reachable from start.
1564 1564 If closed is True, return heads that are marked as closed as well.
1565 1565 '''
1566 1566 if branch is None:
1567 1567 branch = self[None].branch()
1568 1568 branches = self.branchmap()
1569 1569 if branch not in branches:
1570 1570 return []
1571 1571 # the cache returns heads ordered lowest to highest
1572 1572 bheads = list(reversed(branches[branch]))
1573 1573 if start is not None:
1574 1574 # filter out the heads that cannot be reached from startrev
1575 1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1576 1576 bheads = [h for h in bheads if h in fbheads]
1577 1577 if not closed:
1578 1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1579 1579 return bheads
1580 1580
1581 1581 def branches(self, nodes):
1582 1582 if not nodes:
1583 1583 nodes = [self.changelog.tip()]
1584 1584 b = []
1585 1585 for n in nodes:
1586 1586 t = n
1587 1587 while True:
1588 1588 p = self.changelog.parents(n)
1589 1589 if p[1] != nullid or p[0] == nullid:
1590 1590 b.append((t, n, p[0], p[1]))
1591 1591 break
1592 1592 n = p[0]
1593 1593 return b
1594 1594
1595 1595 def between(self, pairs):
1596 1596 r = []
1597 1597
1598 1598 for top, bottom in pairs:
1599 1599 n, l, i = top, [], 0
1600 1600 f = 1
1601 1601
1602 1602 while n != bottom and n != nullid:
1603 1603 p = self.changelog.parents(n)[0]
1604 1604 if i == f:
1605 1605 l.append(n)
1606 1606 f = f * 2
1607 1607 n = p
1608 1608 i += 1
1609 1609
1610 1610 r.append(l)
1611 1611
1612 1612 return r
1613 1613
1614 1614 def pull(self, remote, heads=None, force=False):
1615 1615 # don't open transaction for nothing or you break future useful
1616 1616 # rollback call
1617 1617 tr = None
1618 1618 trname = 'pull\n' + util.hidepassword(remote.url())
1619 1619 lock = self.lock()
1620 1620 try:
1621 1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1622 1622 force=force)
1623 1623 common, fetch, rheads = tmp
1624 1624 if not fetch:
1625 1625 self.ui.status(_("no changes found\n"))
1626 1626 added = []
1627 1627 result = 0
1628 1628 else:
1629 1629 tr = self.transaction(trname)
1630 1630 if heads is None and list(common) == [nullid]:
1631 1631 self.ui.status(_("requesting all changes\n"))
1632 1632 elif heads is None and remote.capable('changegroupsubset'):
1633 1633 # issue1320, avoid a race if remote changed after discovery
1634 1634 heads = rheads
1635 1635
1636 1636 if remote.capable('getbundle'):
1637 1637 cg = remote.getbundle('pull', common=common,
1638 1638 heads=heads or rheads)
1639 1639 elif heads is None:
1640 1640 cg = remote.changegroup(fetch, 'pull')
1641 1641 elif not remote.capable('changegroupsubset'):
1642 1642 raise util.Abort(_("partial pull cannot be done because "
1643 1643 "other repository doesn't support "
1644 1644 "changegroupsubset."))
1645 1645 else:
1646 1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1647 1647 clstart = len(self.changelog)
1648 1648 result = self.addchangegroup(cg, 'pull', remote.url())
1649 1649 clend = len(self.changelog)
1650 1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1651 1651
1652 1652 # compute target subset
1653 1653 if heads is None:
1654 1654 # We pulled every thing possible
1655 1655 # sync on everything common
1656 1656 subset = common + added
1657 1657 else:
1658 1658 # We pulled a specific subset
1659 1659 # sync on this subset
1660 1660 subset = heads
1661 1661
1662 1662 # Get remote phases data from remote
1663 1663 remotephases = remote.listkeys('phases')
1664 1664 publishing = bool(remotephases.get('publishing', False))
1665 1665 if remotephases and not publishing:
1666 1666 # remote is new and unpublishing
1667 1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1668 1668 remotephases)
1669 1669 phases.advanceboundary(self, phases.public, pheads)
1670 1670 phases.advanceboundary(self, phases.draft, subset)
1671 1671 else:
1672 1672 # Remote is old or publishing all common changesets
1673 1673 # should be seen as public
1674 1674 phases.advanceboundary(self, phases.public, subset)
1675 1675
1676 1676 remoteobs = remote.listkeys('obsolete')
1677 1677 if 'dump' in remoteobs:
1678 1678 if tr is None:
1679 1679 tr = self.transaction(trname)
1680 1680 data = base85.b85decode(remoteobs['dump'])
1681 1681 self.obsstore.mergemarkers(tr, data)
1682 1682 if tr is not None:
1683 1683 tr.close()
1684 1684 finally:
1685 1685 if tr is not None:
1686 1686 tr.release()
1687 1687 lock.release()
1688 1688
1689 1689 return result
1690 1690
1691 1691 def checkpush(self, force, revs):
1692 1692 """Extensions can override this function if additional checks have
1693 1693 to be performed before pushing, or call it if they override push
1694 1694 command.
1695 1695 """
1696 1696 pass
1697 1697
1698 1698 def push(self, remote, force=False, revs=None, newbranch=False):
1699 1699 '''Push outgoing changesets (limited by revs) from the current
1700 1700 repository to remote. Return an integer:
1701 1701 - None means nothing to push
1702 1702 - 0 means HTTP error
1703 1703 - 1 means we pushed and remote head count is unchanged *or*
1704 1704 we have outgoing changesets but refused to push
1705 1705 - other values as described by addchangegroup()
1706 1706 '''
1707 1707 # there are two ways to push to remote repo:
1708 1708 #
1709 1709 # addchangegroup assumes local user can lock remote
1710 1710 # repo (local filesystem, old ssh servers).
1711 1711 #
1712 1712 # unbundle assumes local user cannot lock remote repo (new ssh
1713 1713 # servers, http servers).
1714 1714
1715 1715 # get local lock as we might write phase data
1716 1716 locallock = self.lock()
1717 1717 try:
1718 1718 self.checkpush(force, revs)
1719 1719 lock = None
1720 1720 unbundle = remote.capable('unbundle')
1721 1721 if not unbundle:
1722 1722 lock = remote.lock()
1723 1723 try:
1724 1724 # discovery
1725 1725 fci = discovery.findcommonincoming
1726 1726 commoninc = fci(self, remote, force=force)
1727 1727 common, inc, remoteheads = commoninc
1728 1728 fco = discovery.findcommonoutgoing
1729 1729 outgoing = fco(self, remote, onlyheads=revs,
1730 1730 commoninc=commoninc, force=force)
1731 1731
1732 1732
1733 1733 if not outgoing.missing:
1734 1734 # nothing to push
1735 1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1736 1736 ret = None
1737 1737 else:
1738 1738 # something to push
1739 1739 if not force:
1740 1740 discovery.checkheads(self, remote, outgoing,
1741 1741 remoteheads, newbranch,
1742 1742 bool(inc))
1743 1743
1744 1744 # create a changegroup from local
1745 1745 if revs is None and not outgoing.excluded:
1746 1746 # push everything,
1747 1747 # use the fast path, no race possible on push
1748 1748 cg = self._changegroup(outgoing.missing, 'push')
1749 1749 else:
1750 1750 cg = self.getlocalbundle('push', outgoing)
1751 1751
1752 1752 # apply changegroup to remote
1753 1753 if unbundle:
1754 1754 # local repo finds heads on server, finds out what
1755 1755 # revs it must push. once revs transferred, if server
1756 1756 # finds it has different heads (someone else won
1757 1757 # commit/push race), server aborts.
1758 1758 if force:
1759 1759 remoteheads = ['force']
1760 1760 # ssh: return remote's addchangegroup()
1761 1761 # http: return remote's addchangegroup() or 0 for error
1762 1762 ret = remote.unbundle(cg, remoteheads, 'push')
1763 1763 else:
1764 1764 # we return an integer indicating remote head count
1765 1765 # change
1766 1766 ret = remote.addchangegroup(cg, 'push', self.url())
1767 1767
1768 1768 if ret:
1769 1769 # push succeed, synchonize target of the push
1770 1770 cheads = outgoing.missingheads
1771 1771 elif revs is None:
1772 1772 # All out push fails. synchronize all common
1773 1773 cheads = outgoing.commonheads
1774 1774 else:
1775 1775 # I want cheads = heads(::missingheads and ::commonheads)
1776 1776 # (missingheads is revs with secret changeset filtered out)
1777 1777 #
1778 1778 # This can be expressed as:
1779 1779 # cheads = ( (missingheads and ::commonheads)
1780 1780 # + (commonheads and ::missingheads))"
1781 1781 # )
1782 1782 #
1783 1783 # while trying to push we already computed the following:
1784 1784 # common = (::commonheads)
1785 1785 # missing = ((commonheads::missingheads) - commonheads)
1786 1786 #
1787 1787 # We can pick:
1788 1788 # * missingheads part of comon (::commonheads)
1789 1789 common = set(outgoing.common)
1790 1790 cheads = [node for node in revs if node in common]
1791 1791 # and
1792 1792 # * commonheads parents on missing
1793 1793 revset = self.set('%ln and parents(roots(%ln))',
1794 1794 outgoing.commonheads,
1795 1795 outgoing.missing)
1796 1796 cheads.extend(c.node() for c in revset)
1797 1797 # even when we don't push, exchanging phase data is useful
1798 1798 remotephases = remote.listkeys('phases')
1799 1799 if not remotephases: # old server or public only repo
1800 1800 phases.advanceboundary(self, phases.public, cheads)
1801 1801 # don't push any phase data as there is nothing to push
1802 1802 else:
1803 1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1804 1804 pheads, droots = ana
1805 1805 ### Apply remote phase on local
1806 1806 if remotephases.get('publishing', False):
1807 1807 phases.advanceboundary(self, phases.public, cheads)
1808 1808 else: # publish = False
1809 1809 phases.advanceboundary(self, phases.public, pheads)
1810 1810 phases.advanceboundary(self, phases.draft, cheads)
1811 1811 ### Apply local phase on remote
1812 1812
1813 1813 # Get the list of all revs draft on remote by public here.
1814 1814 # XXX Beware that revset break if droots is not strictly
1815 1815 # XXX root we may want to ensure it is but it is costly
1816 1816 outdated = self.set('heads((%ln::%ln) and public())',
1817 1817 droots, cheads)
1818 1818 for newremotehead in outdated:
1819 1819 r = remote.pushkey('phases',
1820 1820 newremotehead.hex(),
1821 1821 str(phases.draft),
1822 1822 str(phases.public))
1823 1823 if not r:
1824 1824 self.ui.warn(_('updating %s to public failed!\n')
1825 1825 % newremotehead)
1826 1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1827 1827 data = self.listkeys('obsolete')['dump']
1828 1828 r = remote.pushkey('obsolete', 'dump', '', data)
1829 1829 if not r:
1830 1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1831 1831 finally:
1832 1832 if lock is not None:
1833 1833 lock.release()
1834 1834 finally:
1835 1835 locallock.release()
1836 1836
1837 1837 self.ui.debug("checking for updated bookmarks\n")
1838 1838 rb = remote.listkeys('bookmarks')
1839 1839 for k in rb.keys():
1840 1840 if k in self._bookmarks:
1841 1841 nr, nl = rb[k], hex(self._bookmarks[k])
1842 1842 if nr in self:
1843 1843 cr = self[nr]
1844 1844 cl = self[nl]
1845 1845 if cl in cr.descendants():
1846 1846 r = remote.pushkey('bookmarks', k, nr, nl)
1847 1847 if r:
1848 1848 self.ui.status(_("updating bookmark %s\n") % k)
1849 1849 else:
1850 1850 self.ui.warn(_('updating bookmark %s'
1851 1851 ' failed!\n') % k)
1852 1852
1853 1853 return ret
1854 1854
1855 1855 def changegroupinfo(self, nodes, source):
1856 1856 if self.ui.verbose or source == 'bundle':
1857 1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1858 1858 if self.ui.debugflag:
1859 1859 self.ui.debug("list of changesets:\n")
1860 1860 for node in nodes:
1861 1861 self.ui.debug("%s\n" % hex(node))
1862 1862
1863 1863 def changegroupsubset(self, bases, heads, source):
1864 1864 """Compute a changegroup consisting of all the nodes that are
1865 1865 descendants of any of the bases and ancestors of any of the heads.
1866 1866 Return a chunkbuffer object whose read() method will return
1867 1867 successive changegroup chunks.
1868 1868
1869 1869 It is fairly complex as determining which filenodes and which
1870 1870 manifest nodes need to be included for the changeset to be complete
1871 1871 is non-trivial.
1872 1872
1873 1873 Another wrinkle is doing the reverse, figuring out which changeset in
1874 1874 the changegroup a particular filenode or manifestnode belongs to.
1875 1875 """
1876 1876 cl = self.changelog
1877 1877 if not bases:
1878 1878 bases = [nullid]
1879 1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1880 1880 # We assume that all ancestors of bases are known
1881 1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1882 1882 return self._changegroupsubset(common, csets, heads, source)
1883 1883
1884 1884 def getlocalbundle(self, source, outgoing):
1885 1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1886 1886
1887 1887 This is only implemented for local repos and reuses potentially
1888 1888 precomputed sets in outgoing."""
1889 1889 if not outgoing.missing:
1890 1890 return None
1891 1891 return self._changegroupsubset(outgoing.common,
1892 1892 outgoing.missing,
1893 1893 outgoing.missingheads,
1894 1894 source)
1895 1895
1896 1896 def getbundle(self, source, heads=None, common=None):
1897 1897 """Like changegroupsubset, but returns the set difference between the
1898 1898 ancestors of heads and the ancestors common.
1899 1899
1900 1900 If heads is None, use the local heads. If common is None, use [nullid].
1901 1901
1902 1902 The nodes in common might not all be known locally due to the way the
1903 1903 current discovery protocol works.
1904 1904 """
1905 1905 cl = self.changelog
1906 1906 if common:
1907 1907 nm = cl.nodemap
1908 1908 common = [n for n in common if n in nm]
1909 1909 else:
1910 1910 common = [nullid]
1911 1911 if not heads:
1912 1912 heads = cl.heads()
1913 1913 return self.getlocalbundle(source,
1914 1914 discovery.outgoing(cl, common, heads))
1915 1915
1916 1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1917 1917
1918 1918 cl = self.changelog
1919 1919 mf = self.manifest
1920 1920 mfs = {} # needed manifests
1921 1921 fnodes = {} # needed file nodes
1922 1922 changedfiles = set()
1923 1923 fstate = ['', {}]
1924 1924 count = [0, 0]
1925 1925
1926 1926 # can we go through the fast path ?
1927 1927 heads.sort()
1928 1928 if heads == sorted(self.heads()):
1929 1929 return self._changegroup(csets, source)
1930 1930
1931 1931 # slow path
1932 1932 self.hook('preoutgoing', throw=True, source=source)
1933 1933 self.changegroupinfo(csets, source)
1934 1934
1935 1935 # filter any nodes that claim to be part of the known set
1936 1936 def prune(revlog, missing):
1937 1937 rr, rl = revlog.rev, revlog.linkrev
1938 1938 return [n for n in missing
1939 1939 if rl(rr(n)) not in commonrevs]
1940 1940
1941 1941 progress = self.ui.progress
1942 1942 _bundling = _('bundling')
1943 1943 _changesets = _('changesets')
1944 1944 _manifests = _('manifests')
1945 1945 _files = _('files')
1946 1946
1947 1947 def lookup(revlog, x):
1948 1948 if revlog == cl:
1949 1949 c = cl.read(x)
1950 1950 changedfiles.update(c[3])
1951 1951 mfs.setdefault(c[0], x)
1952 1952 count[0] += 1
1953 1953 progress(_bundling, count[0],
1954 1954 unit=_changesets, total=count[1])
1955 1955 return x
1956 1956 elif revlog == mf:
1957 1957 clnode = mfs[x]
1958 1958 mdata = mf.readfast(x)
1959 1959 for f, n in mdata.iteritems():
1960 1960 if f in changedfiles:
1961 1961 fnodes[f].setdefault(n, clnode)
1962 1962 count[0] += 1
1963 1963 progress(_bundling, count[0],
1964 1964 unit=_manifests, total=count[1])
1965 1965 return clnode
1966 1966 else:
1967 1967 progress(_bundling, count[0], item=fstate[0],
1968 1968 unit=_files, total=count[1])
1969 1969 return fstate[1][x]
1970 1970
1971 1971 bundler = changegroup.bundle10(lookup)
1972 1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1973 1973 if reorder == 'auto':
1974 1974 reorder = None
1975 1975 else:
1976 1976 reorder = util.parsebool(reorder)
1977 1977
1978 1978 def gengroup():
1979 1979 # Create a changenode group generator that will call our functions
1980 1980 # back to lookup the owning changenode and collect information.
1981 1981 count[:] = [0, len(csets)]
1982 1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1983 1983 yield chunk
1984 1984 progress(_bundling, None)
1985 1985
1986 1986 # Create a generator for the manifestnodes that calls our lookup
1987 1987 # and data collection functions back.
1988 1988 for f in changedfiles:
1989 1989 fnodes[f] = {}
1990 1990 count[:] = [0, len(mfs)]
1991 1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1992 1992 yield chunk
1993 1993 progress(_bundling, None)
1994 1994
1995 1995 mfs.clear()
1996 1996
1997 1997 # Go through all our files in order sorted by name.
1998 1998 count[:] = [0, len(changedfiles)]
1999 1999 for fname in sorted(changedfiles):
2000 2000 filerevlog = self.file(fname)
2001 2001 if not len(filerevlog):
2002 2002 raise util.Abort(_("empty or missing revlog for %s")
2003 2003 % fname)
2004 2004 fstate[0] = fname
2005 2005 fstate[1] = fnodes.pop(fname, {})
2006 2006
2007 2007 nodelist = prune(filerevlog, fstate[1])
2008 2008 if nodelist:
2009 2009 count[0] += 1
2010 2010 yield bundler.fileheader(fname)
2011 2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2012 2012 yield chunk
2013 2013
2014 2014 # Signal that no more groups are left.
2015 2015 yield bundler.close()
2016 2016 progress(_bundling, None)
2017 2017
2018 2018 if csets:
2019 2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2020 2020
2021 2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2022 2022
2023 2023 def changegroup(self, basenodes, source):
2024 2024 # to avoid a race we use changegroupsubset() (issue1320)
2025 2025 return self.changegroupsubset(basenodes, self.heads(), source)
2026 2026
2027 2027 def _changegroup(self, nodes, source):
2028 2028 """Compute the changegroup of all nodes that we have that a recipient
2029 2029 doesn't. Return a chunkbuffer object whose read() method will return
2030 2030 successive changegroup chunks.
2031 2031
2032 2032 This is much easier than the previous function as we can assume that
2033 2033 the recipient has any changenode we aren't sending them.
2034 2034
2035 2035 nodes is the set of nodes to send"""
2036 2036
2037 2037 cl = self.changelog
2038 2038 mf = self.manifest
2039 2039 mfs = {}
2040 2040 changedfiles = set()
2041 2041 fstate = ['']
2042 2042 count = [0, 0]
2043 2043
2044 2044 self.hook('preoutgoing', throw=True, source=source)
2045 2045 self.changegroupinfo(nodes, source)
2046 2046
2047 2047 revset = set([cl.rev(n) for n in nodes])
2048 2048
2049 2049 def gennodelst(log):
2050 2050 ln, llr = log.node, log.linkrev
2051 2051 return [ln(r) for r in log if llr(r) in revset]
2052 2052
2053 2053 progress = self.ui.progress
2054 2054 _bundling = _('bundling')
2055 2055 _changesets = _('changesets')
2056 2056 _manifests = _('manifests')
2057 2057 _files = _('files')
2058 2058
2059 2059 def lookup(revlog, x):
2060 2060 if revlog == cl:
2061 2061 c = cl.read(x)
2062 2062 changedfiles.update(c[3])
2063 2063 mfs.setdefault(c[0], x)
2064 2064 count[0] += 1
2065 2065 progress(_bundling, count[0],
2066 2066 unit=_changesets, total=count[1])
2067 2067 return x
2068 2068 elif revlog == mf:
2069 2069 count[0] += 1
2070 2070 progress(_bundling, count[0],
2071 2071 unit=_manifests, total=count[1])
2072 2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2073 2073 else:
2074 2074 progress(_bundling, count[0], item=fstate[0],
2075 2075 total=count[1], unit=_files)
2076 2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2077 2077
2078 2078 bundler = changegroup.bundle10(lookup)
2079 2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2080 2080 if reorder == 'auto':
2081 2081 reorder = None
2082 2082 else:
2083 2083 reorder = util.parsebool(reorder)
2084 2084
2085 2085 def gengroup():
2086 2086 '''yield a sequence of changegroup chunks (strings)'''
2087 2087 # construct a list of all changed files
2088 2088
2089 2089 count[:] = [0, len(nodes)]
2090 2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2091 2091 yield chunk
2092 2092 progress(_bundling, None)
2093 2093
2094 2094 count[:] = [0, len(mfs)]
2095 2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2096 2096 yield chunk
2097 2097 progress(_bundling, None)
2098 2098
2099 2099 count[:] = [0, len(changedfiles)]
2100 2100 for fname in sorted(changedfiles):
2101 2101 filerevlog = self.file(fname)
2102 2102 if not len(filerevlog):
2103 2103 raise util.Abort(_("empty or missing revlog for %s")
2104 2104 % fname)
2105 2105 fstate[0] = fname
2106 2106 nodelist = gennodelst(filerevlog)
2107 2107 if nodelist:
2108 2108 count[0] += 1
2109 2109 yield bundler.fileheader(fname)
2110 2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2111 2111 yield chunk
2112 2112 yield bundler.close()
2113 2113 progress(_bundling, None)
2114 2114
2115 2115 if nodes:
2116 2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2117 2117
2118 2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2119 2119
2120 2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2121 2121 """Add the changegroup returned by source.read() to this repo.
2122 2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2123 2123 the URL of the repo where this changegroup is coming from.
2124 2124
2125 2125 Return an integer summarizing the change to this repo:
2126 2126 - nothing changed or no source: 0
2127 2127 - more heads than before: 1+added heads (2..n)
2128 2128 - fewer heads than before: -1-removed heads (-2..-n)
2129 2129 - number of heads stays the same: 1
2130 2130 """
2131 2131 def csmap(x):
2132 2132 self.ui.debug("add changeset %s\n" % short(x))
2133 2133 return len(cl)
2134 2134
2135 2135 def revmap(x):
2136 2136 return cl.rev(x)
2137 2137
2138 2138 if not source:
2139 2139 return 0
2140 2140
2141 2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2142 2142
2143 2143 changesets = files = revisions = 0
2144 2144 efiles = set()
2145 2145
2146 2146 # write changelog data to temp files so concurrent readers will not see
2147 2147 # inconsistent view
2148 2148 cl = self.changelog
2149 2149 cl.delayupdate()
2150 2150 oldheads = cl.heads()
2151 2151
2152 2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2153 2153 try:
2154 2154 trp = weakref.proxy(tr)
2155 2155 # pull off the changeset group
2156 2156 self.ui.status(_("adding changesets\n"))
2157 2157 clstart = len(cl)
2158 2158 class prog(object):
2159 2159 step = _('changesets')
2160 2160 count = 1
2161 2161 ui = self.ui
2162 2162 total = None
2163 2163 def __call__(self):
2164 2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2165 2165 total=self.total)
2166 2166 self.count += 1
2167 2167 pr = prog()
2168 2168 source.callback = pr
2169 2169
2170 2170 source.changelogheader()
2171 2171 srccontent = cl.addgroup(source, csmap, trp)
2172 2172 if not (srccontent or emptyok):
2173 2173 raise util.Abort(_("received changelog group is empty"))
2174 2174 clend = len(cl)
2175 2175 changesets = clend - clstart
2176 2176 for c in xrange(clstart, clend):
2177 2177 efiles.update(self[c].files())
2178 2178 efiles = len(efiles)
2179 2179 self.ui.progress(_('changesets'), None)
2180 2180
2181 2181 # pull off the manifest group
2182 2182 self.ui.status(_("adding manifests\n"))
2183 2183 pr.step = _('manifests')
2184 2184 pr.count = 1
2185 2185 pr.total = changesets # manifests <= changesets
2186 2186 # no need to check for empty manifest group here:
2187 2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2188 2188 # no new manifest will be created and the manifest group will
2189 2189 # be empty during the pull
2190 2190 source.manifestheader()
2191 2191 self.manifest.addgroup(source, revmap, trp)
2192 2192 self.ui.progress(_('manifests'), None)
2193 2193
2194 2194 needfiles = {}
2195 2195 if self.ui.configbool('server', 'validate', default=False):
2196 2196 # validate incoming csets have their manifests
2197 2197 for cset in xrange(clstart, clend):
2198 2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2199 2199 mfest = self.manifest.readdelta(mfest)
2200 2200 # store file nodes we must see
2201 2201 for f, n in mfest.iteritems():
2202 2202 needfiles.setdefault(f, set()).add(n)
2203 2203
2204 2204 # process the files
2205 2205 self.ui.status(_("adding file changes\n"))
2206 2206 pr.step = _('files')
2207 2207 pr.count = 1
2208 2208 pr.total = efiles
2209 2209 source.callback = None
2210 2210
2211 2211 while True:
2212 2212 chunkdata = source.filelogheader()
2213 2213 if not chunkdata:
2214 2214 break
2215 2215 f = chunkdata["filename"]
2216 2216 self.ui.debug("adding %s revisions\n" % f)
2217 2217 pr()
2218 2218 fl = self.file(f)
2219 2219 o = len(fl)
2220 2220 if not fl.addgroup(source, revmap, trp):
2221 2221 raise util.Abort(_("received file revlog group is empty"))
2222 2222 revisions += len(fl) - o
2223 2223 files += 1
2224 2224 if f in needfiles:
2225 2225 needs = needfiles[f]
2226 2226 for new in xrange(o, len(fl)):
2227 2227 n = fl.node(new)
2228 2228 if n in needs:
2229 2229 needs.remove(n)
2230 2230 if not needs:
2231 2231 del needfiles[f]
2232 2232 self.ui.progress(_('files'), None)
2233 2233
2234 2234 for f, needs in needfiles.iteritems():
2235 2235 fl = self.file(f)
2236 2236 for n in needs:
2237 2237 try:
2238 2238 fl.rev(n)
2239 2239 except error.LookupError:
2240 2240 raise util.Abort(
2241 2241 _('missing file data for %s:%s - run hg verify') %
2242 2242 (f, hex(n)))
2243 2243
2244 2244 dh = 0
2245 2245 if oldheads:
2246 2246 heads = cl.heads()
2247 2247 dh = len(heads) - len(oldheads)
2248 2248 for h in heads:
2249 2249 if h not in oldheads and self[h].closesbranch():
2250 2250 dh -= 1
2251 2251 htext = ""
2252 2252 if dh:
2253 2253 htext = _(" (%+d heads)") % dh
2254 2254
2255 2255 self.ui.status(_("added %d changesets"
2256 2256 " with %d changes to %d files%s\n")
2257 2257 % (changesets, revisions, files, htext))
2258 2258
2259 2259 if changesets > 0:
2260 2260 p = lambda: cl.writepending() and self.root or ""
2261 2261 self.hook('pretxnchangegroup', throw=True,
2262 2262 node=hex(cl.node(clstart)), source=srctype,
2263 2263 url=url, pending=p)
2264 2264
2265 2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2266 2266 publishing = self.ui.configbool('phases', 'publish', True)
2267 2267 if srctype == 'push':
2268 2268 # Old server can not push the boundary themself.
2269 2269 # New server won't push the boundary if changeset already
2270 2270 # existed locally as secrete
2271 2271 #
2272 2272 # We should not use added here but the list of all change in
2273 2273 # the bundle
2274 2274 if publishing:
2275 2275 phases.advanceboundary(self, phases.public, srccontent)
2276 2276 else:
2277 2277 phases.advanceboundary(self, phases.draft, srccontent)
2278 2278 phases.retractboundary(self, phases.draft, added)
2279 2279 elif srctype != 'strip':
2280 2280 # publishing only alter behavior during push
2281 2281 #
2282 2282 # strip should not touch boundary at all
2283 2283 phases.retractboundary(self, phases.draft, added)
2284 2284
2285 2285 # make changelog see real files again
2286 2286 cl.finalize(trp)
2287 2287
2288 2288 tr.close()
2289 2289
2290 2290 if changesets > 0:
2291 2291 def runhooks():
2292 2292 # forcefully update the on-disk branch cache
2293 2293 self.ui.debug("updating the branch cache\n")
2294 2294 self.updatebranchcache()
2295 2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2296 2296 source=srctype, url=url)
2297 2297
2298 2298 for n in added:
2299 2299 self.hook("incoming", node=hex(n), source=srctype,
2300 2300 url=url)
2301 2301 self._afterlock(runhooks)
2302 2302
2303 2303 finally:
2304 2304 tr.release()
2305 2305 # never return 0 here:
2306 2306 if dh < 0:
2307 2307 return dh - 1
2308 2308 else:
2309 2309 return dh + 1
2310 2310
2311 2311 def stream_in(self, remote, requirements):
2312 2312 lock = self.lock()
2313 2313 try:
2314 2314 fp = remote.stream_out()
2315 2315 l = fp.readline()
2316 2316 try:
2317 2317 resp = int(l)
2318 2318 except ValueError:
2319 2319 raise error.ResponseError(
2320 2320 _('unexpected response from remote server:'), l)
2321 2321 if resp == 1:
2322 2322 raise util.Abort(_('operation forbidden by server'))
2323 2323 elif resp == 2:
2324 2324 raise util.Abort(_('locking the remote repository failed'))
2325 2325 elif resp != 0:
2326 2326 raise util.Abort(_('the server sent an unknown error code'))
2327 2327 self.ui.status(_('streaming all changes\n'))
2328 2328 l = fp.readline()
2329 2329 try:
2330 2330 total_files, total_bytes = map(int, l.split(' ', 1))
2331 2331 except (ValueError, TypeError):
2332 2332 raise error.ResponseError(
2333 2333 _('unexpected response from remote server:'), l)
2334 2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2335 2335 (total_files, util.bytecount(total_bytes)))
2336 2336 handled_bytes = 0
2337 2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2338 2338 start = time.time()
2339 2339 for i in xrange(total_files):
2340 2340 # XXX doesn't support '\n' or '\r' in filenames
2341 2341 l = fp.readline()
2342 2342 try:
2343 2343 name, size = l.split('\0', 1)
2344 2344 size = int(size)
2345 2345 except (ValueError, TypeError):
2346 2346 raise error.ResponseError(
2347 2347 _('unexpected response from remote server:'), l)
2348 2348 if self.ui.debugflag:
2349 2349 self.ui.debug('adding %s (%s)\n' %
2350 2350 (name, util.bytecount(size)))
2351 2351 # for backwards compat, name was partially encoded
2352 2352 ofp = self.sopener(store.decodedir(name), 'w')
2353 2353 for chunk in util.filechunkiter(fp, limit=size):
2354 2354 handled_bytes += len(chunk)
2355 2355 self.ui.progress(_('clone'), handled_bytes,
2356 2356 total=total_bytes)
2357 2357 ofp.write(chunk)
2358 2358 ofp.close()
2359 2359 elapsed = time.time() - start
2360 2360 if elapsed <= 0:
2361 2361 elapsed = 0.001
2362 2362 self.ui.progress(_('clone'), None)
2363 2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2364 2364 (util.bytecount(total_bytes), elapsed,
2365 2365 util.bytecount(total_bytes / elapsed)))
2366 2366
2367 2367 # new requirements = old non-format requirements +
2368 2368 # new format-related
2369 2369 # requirements from the streamed-in repository
2370 2370 requirements.update(set(self.requirements) - self.supportedformats)
2371 2371 self._applyrequirements(requirements)
2372 2372 self._writerequirements()
2373 2373
2374 2374 self.invalidate()
2375 2375 return len(self.heads()) + 1
2376 2376 finally:
2377 2377 lock.release()
2378 2378
2379 2379 def clone(self, remote, heads=[], stream=False):
2380 2380 '''clone remote repository.
2381 2381
2382 2382 keyword arguments:
2383 2383 heads: list of revs to clone (forces use of pull)
2384 2384 stream: use streaming clone if possible'''
2385 2385
2386 2386 # now, all clients that can request uncompressed clones can
2387 2387 # read repo formats supported by all servers that can serve
2388 2388 # them.
2389 2389
2390 2390 # if revlog format changes, client will have to check version
2391 2391 # and format flags on "stream" capability, and use
2392 2392 # uncompressed only if compatible.
2393 2393
2394 2394 if not stream:
2395 2395 # if the server explicitely prefer to stream (for fast LANs)
2396 2396 stream = remote.capable('stream-preferred')
2397 2397
2398 2398 if stream and not heads:
2399 2399 # 'stream' means remote revlog format is revlogv1 only
2400 2400 if remote.capable('stream'):
2401 2401 return self.stream_in(remote, set(('revlogv1',)))
2402 2402 # otherwise, 'streamreqs' contains the remote revlog format
2403 2403 streamreqs = remote.capable('streamreqs')
2404 2404 if streamreqs:
2405 2405 streamreqs = set(streamreqs.split(','))
2406 2406 # if we support it, stream in and adjust our requirements
2407 2407 if not streamreqs - self.supportedformats:
2408 2408 return self.stream_in(remote, streamreqs)
2409 2409 return self.pull(remote, heads)
2410 2410
2411 2411 def pushkey(self, namespace, key, old, new):
2412 2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2413 2413 old=old, new=new)
2414 2414 ret = pushkey.push(self, namespace, key, old, new)
2415 2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2416 2416 ret=ret)
2417 2417 return ret
2418 2418
2419 2419 def listkeys(self, namespace):
2420 2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2421 2421 values = pushkey.list(self, namespace)
2422 2422 self.hook('listkeys', namespace=namespace, values=values)
2423 2423 return values
2424 2424
2425 2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2426 2426 '''used to test argument passing over the wire'''
2427 2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2428 2428
2429 2429 def savecommitmessage(self, text):
2430 2430 fp = self.opener('last-message.txt', 'wb')
2431 2431 try:
2432 2432 fp.write(text)
2433 2433 finally:
2434 2434 fp.close()
2435 2435 return self.pathto(fp.name[len(self.root)+1:])
2436 2436
2437 2437 # used to avoid circular references so destructors work
2438 2438 def aftertrans(files):
2439 2439 renamefiles = [tuple(t) for t in files]
2440 2440 def a():
2441 2441 for src, dest in renamefiles:
2442 2442 try:
2443 2443 util.rename(src, dest)
2444 2444 except OSError: # journal file does not yet exist
2445 2445 pass
2446 2446 return a
2447 2447
2448 2448 def undoname(fn):
2449 2449 base, name = os.path.split(fn)
2450 2450 assert name.startswith('journal')
2451 2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2452 2452
2453 2453 def instance(ui, path, create):
2454 2454 return localrepository(ui, util.urllocalpath(path), create)
2455 2455
2456 2456 def islocal(path):
2457 2457 return True
@@ -1,889 +1,907 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import util, error, osutil, revset, similar, encoding
10 10 import match as matchmod
11 11 import os, errno, re, stat, sys, glob
12 12
13 13 def nochangesfound(ui, secretlist=None):
14 14 '''report no changes for push/pull'''
15 15 if secretlist:
16 16 ui.status(_("no changes found (ignored %d secret changesets)\n")
17 17 % len(secretlist))
18 18 else:
19 19 ui.status(_("no changes found\n"))
20 20
21 21 def checkfilename(f):
22 22 '''Check that the filename f is an acceptable filename for a tracked file'''
23 23 if '\r' in f or '\n' in f:
24 24 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
25 25
26 26 def checkportable(ui, f):
27 27 '''Check if filename f is portable and warn or abort depending on config'''
28 28 checkfilename(f)
29 29 abort, warn = checkportabilityalert(ui)
30 30 if abort or warn:
31 31 msg = util.checkwinfilename(f)
32 32 if msg:
33 33 msg = "%s: %r" % (msg, f)
34 34 if abort:
35 35 raise util.Abort(msg)
36 36 ui.warn(_("warning: %s\n") % msg)
37 37
38 38 def checkportabilityalert(ui):
39 39 '''check if the user's config requests nothing, a warning, or abort for
40 40 non-portable filenames'''
41 41 val = ui.config('ui', 'portablefilenames', 'warn')
42 42 lval = val.lower()
43 43 bval = util.parsebool(val)
44 44 abort = os.name == 'nt' or lval == 'abort'
45 45 warn = bval or lval == 'warn'
46 46 if bval is None and not (warn or abort or lval == 'ignore'):
47 47 raise error.ConfigError(
48 48 _("ui.portablefilenames value is invalid ('%s')") % val)
49 49 return abort, warn
50 50
51 51 class casecollisionauditor(object):
52 52 def __init__(self, ui, abort, existingiter):
53 53 self._ui = ui
54 54 self._abort = abort
55 55 self._map = {}
56 56 for f in existingiter:
57 57 self._map[encoding.lower(f)] = f
58 58
59 59 def __call__(self, f):
60 60 fl = encoding.lower(f)
61 61 map = self._map
62 62 if fl in map and map[fl] != f:
63 63 msg = _('possible case-folding collision for %s') % f
64 64 if self._abort:
65 65 raise util.Abort(msg)
66 66 self._ui.warn(_("warning: %s\n") % msg)
67 67 map[fl] = f
68 68
69 69 class pathauditor(object):
70 70 '''ensure that a filesystem path contains no banned components.
71 71 the following properties of a path are checked:
72 72
73 73 - ends with a directory separator
74 74 - under top-level .hg
75 75 - starts at the root of a windows drive
76 76 - contains ".."
77 77 - traverses a symlink (e.g. a/symlink_here/b)
78 78 - inside a nested repository (a callback can be used to approve
79 79 some nested repositories, e.g., subrepositories)
80 80 '''
81 81
82 82 def __init__(self, root, callback=None):
83 83 self.audited = set()
84 84 self.auditeddir = set()
85 85 self.root = root
86 86 self.callback = callback
87 87 if os.path.lexists(root) and not util.checkcase(root):
88 88 self.normcase = util.normcase
89 89 else:
90 90 self.normcase = lambda x: x
91 91
92 92 def __call__(self, path):
93 93 '''Check the relative path.
94 94 path may contain a pattern (e.g. foodir/**.txt)'''
95 95
96 96 path = util.localpath(path)
97 97 normpath = self.normcase(path)
98 98 if normpath in self.audited:
99 99 return
100 100 # AIX ignores "/" at end of path, others raise EISDIR.
101 101 if util.endswithsep(path):
102 102 raise util.Abort(_("path ends in directory separator: %s") % path)
103 103 parts = util.splitpath(path)
104 104 if (os.path.splitdrive(path)[0]
105 105 or parts[0].lower() in ('.hg', '.hg.', '')
106 106 or os.pardir in parts):
107 107 raise util.Abort(_("path contains illegal component: %s") % path)
108 108 if '.hg' in path.lower():
109 109 lparts = [p.lower() for p in parts]
110 110 for p in '.hg', '.hg.':
111 111 if p in lparts[1:]:
112 112 pos = lparts.index(p)
113 113 base = os.path.join(*parts[:pos])
114 114 raise util.Abort(_("path '%s' is inside nested repo %r")
115 115 % (path, base))
116 116
117 117 normparts = util.splitpath(normpath)
118 118 assert len(parts) == len(normparts)
119 119
120 120 parts.pop()
121 121 normparts.pop()
122 122 prefixes = []
123 123 while parts:
124 124 prefix = os.sep.join(parts)
125 125 normprefix = os.sep.join(normparts)
126 126 if normprefix in self.auditeddir:
127 127 break
128 128 curpath = os.path.join(self.root, prefix)
129 129 try:
130 130 st = os.lstat(curpath)
131 131 except OSError, err:
132 132 # EINVAL can be raised as invalid path syntax under win32.
133 133 # They must be ignored for patterns can be checked too.
134 134 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
135 135 raise
136 136 else:
137 137 if stat.S_ISLNK(st.st_mode):
138 138 raise util.Abort(
139 139 _('path %r traverses symbolic link %r')
140 140 % (path, prefix))
141 141 elif (stat.S_ISDIR(st.st_mode) and
142 142 os.path.isdir(os.path.join(curpath, '.hg'))):
143 143 if not self.callback or not self.callback(curpath):
144 144 raise util.Abort(_("path '%s' is inside nested "
145 145 "repo %r")
146 146 % (path, prefix))
147 147 prefixes.append(normprefix)
148 148 parts.pop()
149 149 normparts.pop()
150 150
151 151 self.audited.add(normpath)
152 152 # only add prefixes to the cache after checking everything: we don't
153 153 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
154 154 self.auditeddir.update(prefixes)
155 155
156 156 class abstractopener(object):
157 157 """Abstract base class; cannot be instantiated"""
158 158
159 159 def __init__(self, *args, **kwargs):
160 160 '''Prevent instantiation; don't call this from subclasses.'''
161 161 raise NotImplementedError('attempted instantiating ' + str(type(self)))
162 162
163 163 def tryread(self, path):
164 164 '''gracefully return an empty string for missing files'''
165 165 try:
166 166 return self.read(path)
167 167 except IOError, inst:
168 168 if inst.errno != errno.ENOENT:
169 169 raise
170 170 return ""
171 171
172 172 def read(self, path):
173 173 fp = self(path, 'rb')
174 174 try:
175 175 return fp.read()
176 176 finally:
177 177 fp.close()
178 178
179 179 def write(self, path, data):
180 180 fp = self(path, 'wb')
181 181 try:
182 182 return fp.write(data)
183 183 finally:
184 184 fp.close()
185 185
186 186 def append(self, path, data):
187 187 fp = self(path, 'ab')
188 188 try:
189 189 return fp.write(data)
190 190 finally:
191 191 fp.close()
192 192
193 def mkdir(self, path=None):
194 return os.mkdir(self.join(path))
195
196 def exists(self, path=None):
197 return os.path.exists(self.join(path))
198
199 def isdir(self, path=None):
200 return os.path.isdir(self.join(path))
201
202 def makedir(self, path=None, notindexed=True):
203 return util.makedir(self.join(path), notindexed)
204
205 def makedirs(self, path=None, mode=None):
206 return util.makedirs(self.join(path), mode)
207
193 208 class opener(abstractopener):
194 209 '''Open files relative to a base directory
195 210
196 211 This class is used to hide the details of COW semantics and
197 212 remote file access from higher level code.
198 213 '''
199 214 def __init__(self, base, audit=True, expand=False):
200 215 if expand:
201 216 base = os.path.realpath(util.expandpath(base))
202 217 self.base = base
203 218 self._audit = audit
204 219 if audit:
205 220 self.auditor = pathauditor(base)
206 221 else:
207 222 self.auditor = util.always
208 223 self.createmode = None
209 224 self._trustnlink = None
210 225
211 226 @util.propertycache
212 227 def _cansymlink(self):
213 228 return util.checklink(self.base)
214 229
215 230 def _fixfilemode(self, name):
216 231 if self.createmode is None:
217 232 return
218 233 os.chmod(name, self.createmode & 0666)
219 234
220 235 def __call__(self, path, mode="r", text=False, atomictemp=False):
221 236 if self._audit:
222 237 r = util.checkosfilename(path)
223 238 if r:
224 239 raise util.Abort("%s: %r" % (r, path))
225 240 self.auditor(path)
226 241 f = self.join(path)
227 242
228 243 if not text and "b" not in mode:
229 244 mode += "b" # for that other OS
230 245
231 246 nlink = -1
232 247 dirname, basename = os.path.split(f)
233 248 # If basename is empty, then the path is malformed because it points
234 249 # to a directory. Let the posixfile() call below raise IOError.
235 250 if basename and mode not in ('r', 'rb'):
236 251 if atomictemp:
237 252 if not os.path.isdir(dirname):
238 253 util.makedirs(dirname, self.createmode)
239 254 return util.atomictempfile(f, mode, self.createmode)
240 255 try:
241 256 if 'w' in mode:
242 257 util.unlink(f)
243 258 nlink = 0
244 259 else:
245 260 # nlinks() may behave differently for files on Windows
246 261 # shares if the file is open.
247 262 fd = util.posixfile(f)
248 263 nlink = util.nlinks(f)
249 264 if nlink < 1:
250 265 nlink = 2 # force mktempcopy (issue1922)
251 266 fd.close()
252 267 except (OSError, IOError), e:
253 268 if e.errno != errno.ENOENT:
254 269 raise
255 270 nlink = 0
256 271 if not os.path.isdir(dirname):
257 272 util.makedirs(dirname, self.createmode)
258 273 if nlink > 0:
259 274 if self._trustnlink is None:
260 275 self._trustnlink = nlink > 1 or util.checknlink(f)
261 276 if nlink > 1 or not self._trustnlink:
262 277 util.rename(util.mktempcopy(f), f)
263 278 fp = util.posixfile(f, mode)
264 279 if nlink == 0:
265 280 self._fixfilemode(f)
266 281 return fp
267 282
268 283 def symlink(self, src, dst):
269 284 self.auditor(dst)
270 285 linkname = self.join(dst)
271 286 try:
272 287 os.unlink(linkname)
273 288 except OSError:
274 289 pass
275 290
276 291 dirname = os.path.dirname(linkname)
277 292 if not os.path.exists(dirname):
278 293 util.makedirs(dirname, self.createmode)
279 294
280 295 if self._cansymlink:
281 296 try:
282 297 os.symlink(src, linkname)
283 298 except OSError, err:
284 299 raise OSError(err.errno, _('could not symlink to %r: %s') %
285 300 (src, err.strerror), linkname)
286 301 else:
287 302 f = self(dst, "w")
288 303 f.write(src)
289 304 f.close()
290 305 self._fixfilemode(dst)
291 306
292 307 def audit(self, path):
293 308 self.auditor(path)
294 309
295 310 def join(self, path):
296 return os.path.join(self.base, path)
311 if path:
312 return os.path.join(self.base, path)
313 else:
314 return self.base
297 315
298 316 class filteropener(abstractopener):
299 317 '''Wrapper opener for filtering filenames with a function.'''
300 318
301 319 def __init__(self, opener, filter):
302 320 self._filter = filter
303 321 self._orig = opener
304 322
305 323 def __call__(self, path, *args, **kwargs):
306 324 return self._orig(self._filter(path), *args, **kwargs)
307 325
308 326 def canonpath(root, cwd, myname, auditor=None):
309 327 '''return the canonical path of myname, given cwd and root'''
310 328 if util.endswithsep(root):
311 329 rootsep = root
312 330 else:
313 331 rootsep = root + os.sep
314 332 name = myname
315 333 if not os.path.isabs(name):
316 334 name = os.path.join(root, cwd, name)
317 335 name = os.path.normpath(name)
318 336 if auditor is None:
319 337 auditor = pathauditor(root)
320 338 if name != rootsep and name.startswith(rootsep):
321 339 name = name[len(rootsep):]
322 340 auditor(name)
323 341 return util.pconvert(name)
324 342 elif name == root:
325 343 return ''
326 344 else:
327 345 # Determine whether `name' is in the hierarchy at or beneath `root',
328 346 # by iterating name=dirname(name) until that causes no change (can't
329 347 # check name == '/', because that doesn't work on windows). The list
330 348 # `rel' holds the reversed list of components making up the relative
331 349 # file name we want.
332 350 rel = []
333 351 while True:
334 352 try:
335 353 s = util.samefile(name, root)
336 354 except OSError:
337 355 s = False
338 356 if s:
339 357 if not rel:
340 358 # name was actually the same as root (maybe a symlink)
341 359 return ''
342 360 rel.reverse()
343 361 name = os.path.join(*rel)
344 362 auditor(name)
345 363 return util.pconvert(name)
346 364 dirname, basename = os.path.split(name)
347 365 rel.append(basename)
348 366 if dirname == name:
349 367 break
350 368 name = dirname
351 369
352 370 raise util.Abort('%s not under root' % myname)
353 371
354 372 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
355 373 '''yield every hg repository under path, always recursively.
356 374 The recurse flag will only control recursion into repo working dirs'''
357 375 def errhandler(err):
358 376 if err.filename == path:
359 377 raise err
360 378 samestat = getattr(os.path, 'samestat', None)
361 379 if followsym and samestat is not None:
362 380 def adddir(dirlst, dirname):
363 381 match = False
364 382 dirstat = os.stat(dirname)
365 383 for lstdirstat in dirlst:
366 384 if samestat(dirstat, lstdirstat):
367 385 match = True
368 386 break
369 387 if not match:
370 388 dirlst.append(dirstat)
371 389 return not match
372 390 else:
373 391 followsym = False
374 392
375 393 if (seen_dirs is None) and followsym:
376 394 seen_dirs = []
377 395 adddir(seen_dirs, path)
378 396 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
379 397 dirs.sort()
380 398 if '.hg' in dirs:
381 399 yield root # found a repository
382 400 qroot = os.path.join(root, '.hg', 'patches')
383 401 if os.path.isdir(os.path.join(qroot, '.hg')):
384 402 yield qroot # we have a patch queue repo here
385 403 if recurse:
386 404 # avoid recursing inside the .hg directory
387 405 dirs.remove('.hg')
388 406 else:
389 407 dirs[:] = [] # don't descend further
390 408 elif followsym:
391 409 newdirs = []
392 410 for d in dirs:
393 411 fname = os.path.join(root, d)
394 412 if adddir(seen_dirs, fname):
395 413 if os.path.islink(fname):
396 414 for hgname in walkrepos(fname, True, seen_dirs):
397 415 yield hgname
398 416 else:
399 417 newdirs.append(d)
400 418 dirs[:] = newdirs
401 419
402 420 def osrcpath():
403 421 '''return default os-specific hgrc search path'''
404 422 path = systemrcpath()
405 423 path.extend(userrcpath())
406 424 path = [os.path.normpath(f) for f in path]
407 425 return path
408 426
409 427 _rcpath = None
410 428
411 429 def rcpath():
412 430 '''return hgrc search path. if env var HGRCPATH is set, use it.
413 431 for each item in path, if directory, use files ending in .rc,
414 432 else use item.
415 433 make HGRCPATH empty to only look in .hg/hgrc of current repo.
416 434 if no HGRCPATH, use default os-specific path.'''
417 435 global _rcpath
418 436 if _rcpath is None:
419 437 if 'HGRCPATH' in os.environ:
420 438 _rcpath = []
421 439 for p in os.environ['HGRCPATH'].split(os.pathsep):
422 440 if not p:
423 441 continue
424 442 p = util.expandpath(p)
425 443 if os.path.isdir(p):
426 444 for f, kind in osutil.listdir(p):
427 445 if f.endswith('.rc'):
428 446 _rcpath.append(os.path.join(p, f))
429 447 else:
430 448 _rcpath.append(p)
431 449 else:
432 450 _rcpath = osrcpath()
433 451 return _rcpath
434 452
435 453 if os.name != 'nt':
436 454
437 455 def rcfiles(path):
438 456 rcs = [os.path.join(path, 'hgrc')]
439 457 rcdir = os.path.join(path, 'hgrc.d')
440 458 try:
441 459 rcs.extend([os.path.join(rcdir, f)
442 460 for f, kind in osutil.listdir(rcdir)
443 461 if f.endswith(".rc")])
444 462 except OSError:
445 463 pass
446 464 return rcs
447 465
448 466 def systemrcpath():
449 467 path = []
450 468 if sys.platform == 'plan9':
451 469 root = 'lib/mercurial'
452 470 else:
453 471 root = 'etc/mercurial'
454 472 # old mod_python does not set sys.argv
455 473 if len(getattr(sys, 'argv', [])) > 0:
456 474 p = os.path.dirname(os.path.dirname(sys.argv[0]))
457 475 path.extend(rcfiles(os.path.join(p, root)))
458 476 path.extend(rcfiles('/' + root))
459 477 return path
460 478
461 479 def userrcpath():
462 480 if sys.platform == 'plan9':
463 481 return [os.environ['home'] + '/lib/hgrc']
464 482 else:
465 483 return [os.path.expanduser('~/.hgrc')]
466 484
467 485 else:
468 486
469 487 import _winreg
470 488
471 489 def systemrcpath():
472 490 '''return default os-specific hgrc search path'''
473 491 rcpath = []
474 492 filename = util.executablepath()
475 493 # Use mercurial.ini found in directory with hg.exe
476 494 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
477 495 if os.path.isfile(progrc):
478 496 rcpath.append(progrc)
479 497 return rcpath
480 498 # Use hgrc.d found in directory with hg.exe
481 499 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
482 500 if os.path.isdir(progrcd):
483 501 for f, kind in osutil.listdir(progrcd):
484 502 if f.endswith('.rc'):
485 503 rcpath.append(os.path.join(progrcd, f))
486 504 return rcpath
487 505 # else look for a system rcpath in the registry
488 506 value = util.lookupreg('SOFTWARE\\Mercurial', None,
489 507 _winreg.HKEY_LOCAL_MACHINE)
490 508 if not isinstance(value, str) or not value:
491 509 return rcpath
492 510 value = util.localpath(value)
493 511 for p in value.split(os.pathsep):
494 512 if p.lower().endswith('mercurial.ini'):
495 513 rcpath.append(p)
496 514 elif os.path.isdir(p):
497 515 for f, kind in osutil.listdir(p):
498 516 if f.endswith('.rc'):
499 517 rcpath.append(os.path.join(p, f))
500 518 return rcpath
501 519
502 520 def userrcpath():
503 521 '''return os-specific hgrc search path to the user dir'''
504 522 home = os.path.expanduser('~')
505 523 path = [os.path.join(home, 'mercurial.ini'),
506 524 os.path.join(home, '.hgrc')]
507 525 userprofile = os.environ.get('USERPROFILE')
508 526 if userprofile:
509 527 path.append(os.path.join(userprofile, 'mercurial.ini'))
510 528 path.append(os.path.join(userprofile, '.hgrc'))
511 529 return path
512 530
513 531 def revsingle(repo, revspec, default='.'):
514 532 if not revspec:
515 533 return repo[default]
516 534
517 535 l = revrange(repo, [revspec])
518 536 if len(l) < 1:
519 537 raise util.Abort(_('empty revision set'))
520 538 return repo[l[-1]]
521 539
522 540 def revpair(repo, revs):
523 541 if not revs:
524 542 return repo.dirstate.p1(), None
525 543
526 544 l = revrange(repo, revs)
527 545
528 546 if len(l) == 0:
529 547 if revs:
530 548 raise util.Abort(_('empty revision range'))
531 549 return repo.dirstate.p1(), None
532 550
533 551 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
534 552 return repo.lookup(l[0]), None
535 553
536 554 return repo.lookup(l[0]), repo.lookup(l[-1])
537 555
538 556 _revrangesep = ':'
539 557
540 558 def revrange(repo, revs):
541 559 """Yield revision as strings from a list of revision specifications."""
542 560
543 561 def revfix(repo, val, defval):
544 562 if not val and val != 0 and defval is not None:
545 563 return defval
546 564 return repo[val].rev()
547 565
548 566 seen, l = set(), []
549 567 for spec in revs:
550 568 if l and not seen:
551 569 seen = set(l)
552 570 # attempt to parse old-style ranges first to deal with
553 571 # things like old-tag which contain query metacharacters
554 572 try:
555 573 if isinstance(spec, int):
556 574 seen.add(spec)
557 575 l.append(spec)
558 576 continue
559 577
560 578 if _revrangesep in spec:
561 579 start, end = spec.split(_revrangesep, 1)
562 580 start = revfix(repo, start, 0)
563 581 end = revfix(repo, end, len(repo) - 1)
564 582 step = start > end and -1 or 1
565 583 if not seen and not l:
566 584 # by far the most common case: revs = ["-1:0"]
567 585 l = range(start, end + step, step)
568 586 # defer syncing seen until next iteration
569 587 continue
570 588 newrevs = set(xrange(start, end + step, step))
571 589 if seen:
572 590 newrevs.difference_update(seen)
573 591 seen.update(newrevs)
574 592 else:
575 593 seen = newrevs
576 594 l.extend(sorted(newrevs, reverse=start > end))
577 595 continue
578 596 elif spec and spec in repo: # single unquoted rev
579 597 rev = revfix(repo, spec, None)
580 598 if rev in seen:
581 599 continue
582 600 seen.add(rev)
583 601 l.append(rev)
584 602 continue
585 603 except error.RepoLookupError:
586 604 pass
587 605
588 606 # fall through to new-style queries if old-style fails
589 607 m = revset.match(repo.ui, spec)
590 608 dl = [r for r in m(repo, xrange(len(repo))) if r not in seen]
591 609 l.extend(dl)
592 610 seen.update(dl)
593 611
594 612 return l
595 613
596 614 def expandpats(pats):
597 615 if not util.expandglobs:
598 616 return list(pats)
599 617 ret = []
600 618 for p in pats:
601 619 kind, name = matchmod._patsplit(p, None)
602 620 if kind is None:
603 621 try:
604 622 globbed = glob.glob(name)
605 623 except re.error:
606 624 globbed = [name]
607 625 if globbed:
608 626 ret.extend(globbed)
609 627 continue
610 628 ret.append(p)
611 629 return ret
612 630
613 631 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
614 632 if pats == ("",):
615 633 pats = []
616 634 if not globbed and default == 'relpath':
617 635 pats = expandpats(pats or [])
618 636
619 637 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
620 638 default)
621 639 def badfn(f, msg):
622 640 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
623 641 m.bad = badfn
624 642 return m, pats
625 643
626 644 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
627 645 return matchandpats(ctx, pats, opts, globbed, default)[0]
628 646
629 647 def matchall(repo):
630 648 return matchmod.always(repo.root, repo.getcwd())
631 649
632 650 def matchfiles(repo, files):
633 651 return matchmod.exact(repo.root, repo.getcwd(), files)
634 652
635 653 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
636 654 if dry_run is None:
637 655 dry_run = opts.get('dry_run')
638 656 if similarity is None:
639 657 similarity = float(opts.get('similarity') or 0)
640 658 # we'd use status here, except handling of symlinks and ignore is tricky
641 659 added, unknown, deleted, removed = [], [], [], []
642 660 audit_path = pathauditor(repo.root)
643 661 m = match(repo[None], pats, opts)
644 662 rejected = []
645 663 m.bad = lambda x, y: rejected.append(x)
646 664
647 665 for abs in repo.walk(m):
648 666 target = repo.wjoin(abs)
649 667 good = True
650 668 try:
651 669 audit_path(abs)
652 670 except (OSError, util.Abort):
653 671 good = False
654 672 rel = m.rel(abs)
655 673 exact = m.exact(abs)
656 674 if good and abs not in repo.dirstate:
657 675 unknown.append(abs)
658 676 if repo.ui.verbose or not exact:
659 677 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
660 678 elif (repo.dirstate[abs] != 'r' and
661 679 (not good or not os.path.lexists(target) or
662 680 (os.path.isdir(target) and not os.path.islink(target)))):
663 681 deleted.append(abs)
664 682 if repo.ui.verbose or not exact:
665 683 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
666 684 # for finding renames
667 685 elif repo.dirstate[abs] == 'r':
668 686 removed.append(abs)
669 687 elif repo.dirstate[abs] == 'a':
670 688 added.append(abs)
671 689 copies = {}
672 690 if similarity > 0:
673 691 for old, new, score in similar.findrenames(repo,
674 692 added + unknown, removed + deleted, similarity):
675 693 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
676 694 repo.ui.status(_('recording removal of %s as rename to %s '
677 695 '(%d%% similar)\n') %
678 696 (m.rel(old), m.rel(new), score * 100))
679 697 copies[new] = old
680 698
681 699 if not dry_run:
682 700 wctx = repo[None]
683 701 wlock = repo.wlock()
684 702 try:
685 703 wctx.forget(deleted)
686 704 wctx.add(unknown)
687 705 for new, old in copies.iteritems():
688 706 wctx.copy(old, new)
689 707 finally:
690 708 wlock.release()
691 709
692 710 for f in rejected:
693 711 if f in m.files():
694 712 return 1
695 713 return 0
696 714
697 715 def updatedir(ui, repo, patches, similarity=0):
698 716 '''Update dirstate after patch application according to metadata'''
699 717 if not patches:
700 718 return []
701 719 copies = []
702 720 removes = set()
703 721 cfiles = patches.keys()
704 722 cwd = repo.getcwd()
705 723 if cwd:
706 724 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
707 725 for f in patches:
708 726 gp = patches[f]
709 727 if not gp:
710 728 continue
711 729 if gp.op == 'RENAME':
712 730 copies.append((gp.oldpath, gp.path))
713 731 removes.add(gp.oldpath)
714 732 elif gp.op == 'COPY':
715 733 copies.append((gp.oldpath, gp.path))
716 734 elif gp.op == 'DELETE':
717 735 removes.add(gp.path)
718 736
719 737 wctx = repo[None]
720 738 for src, dst in copies:
721 739 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
722 740 if (not similarity) and removes:
723 741 wctx.remove(sorted(removes), True)
724 742
725 743 for f in patches:
726 744 gp = patches[f]
727 745 if gp and gp.mode:
728 746 islink, isexec = gp.mode
729 747 dst = repo.wjoin(gp.path)
730 748 # patch won't create empty files
731 749 if gp.op == 'ADD' and not os.path.lexists(dst):
732 750 flags = (isexec and 'x' or '') + (islink and 'l' or '')
733 751 repo.wwrite(gp.path, '', flags)
734 752 util.setflags(dst, islink, isexec)
735 753 addremove(repo, cfiles, similarity=similarity)
736 754 files = patches.keys()
737 755 files.extend([r for r in removes if r not in files])
738 756 return sorted(files)
739 757
740 758 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
741 759 """Update the dirstate to reflect the intent of copying src to dst. For
742 760 different reasons it might not end with dst being marked as copied from src.
743 761 """
744 762 origsrc = repo.dirstate.copied(src) or src
745 763 if dst == origsrc: # copying back a copy?
746 764 if repo.dirstate[dst] not in 'mn' and not dryrun:
747 765 repo.dirstate.normallookup(dst)
748 766 else:
749 767 if repo.dirstate[origsrc] == 'a' and origsrc == src:
750 768 if not ui.quiet:
751 769 ui.warn(_("%s has not been committed yet, so no copy "
752 770 "data will be stored for %s.\n")
753 771 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
754 772 if repo.dirstate[dst] in '?r' and not dryrun:
755 773 wctx.add([dst])
756 774 elif not dryrun:
757 775 wctx.copy(origsrc, dst)
758 776
759 777 def readrequires(opener, supported):
760 778 '''Reads and parses .hg/requires and checks if all entries found
761 779 are in the list of supported features.'''
762 780 requirements = set(opener.read("requires").splitlines())
763 781 missings = []
764 782 for r in requirements:
765 783 if r not in supported:
766 784 if not r or not r[0].isalnum():
767 785 raise error.RequirementError(_(".hg/requires file is corrupt"))
768 786 missings.append(r)
769 787 missings.sort()
770 788 if missings:
771 789 raise error.RequirementError(
772 790 _("unknown repository format: requires features '%s' (upgrade "
773 791 "Mercurial)") % "', '".join(missings))
774 792 return requirements
775 793
776 794 class filecacheentry(object):
777 795 def __init__(self, path):
778 796 self.path = path
779 797 self.cachestat = filecacheentry.stat(self.path)
780 798
781 799 if self.cachestat:
782 800 self._cacheable = self.cachestat.cacheable()
783 801 else:
784 802 # None means we don't know yet
785 803 self._cacheable = None
786 804
787 805 def refresh(self):
788 806 if self.cacheable():
789 807 self.cachestat = filecacheentry.stat(self.path)
790 808
791 809 def cacheable(self):
792 810 if self._cacheable is not None:
793 811 return self._cacheable
794 812
795 813 # we don't know yet, assume it is for now
796 814 return True
797 815
798 816 def changed(self):
799 817 # no point in going further if we can't cache it
800 818 if not self.cacheable():
801 819 return True
802 820
803 821 newstat = filecacheentry.stat(self.path)
804 822
805 823 # we may not know if it's cacheable yet, check again now
806 824 if newstat and self._cacheable is None:
807 825 self._cacheable = newstat.cacheable()
808 826
809 827 # check again
810 828 if not self._cacheable:
811 829 return True
812 830
813 831 if self.cachestat != newstat:
814 832 self.cachestat = newstat
815 833 return True
816 834 else:
817 835 return False
818 836
819 837 @staticmethod
820 838 def stat(path):
821 839 try:
822 840 return util.cachestat(path)
823 841 except OSError, e:
824 842 if e.errno != errno.ENOENT:
825 843 raise
826 844
827 845 class filecache(object):
828 846 '''A property like decorator that tracks a file under .hg/ for updates.
829 847
830 848 Records stat info when called in _filecache.
831 849
832 850 On subsequent calls, compares old stat info with new info, and recreates
833 851 the object when needed, updating the new stat info in _filecache.
834 852
835 853 Mercurial either atomic renames or appends for files under .hg,
836 854 so to ensure the cache is reliable we need the filesystem to be able
837 855 to tell us if a file has been replaced. If it can't, we fallback to
838 856 recreating the object on every call (essentially the same behaviour as
839 857 propertycache).'''
840 858 def __init__(self, path):
841 859 self.path = path
842 860
843 861 def join(self, obj, fname):
844 862 """Used to compute the runtime path of the cached file.
845 863
846 864 Users should subclass filecache and provide their own version of this
847 865 function to call the appropriate join function on 'obj' (an instance
848 866 of the class that its member function was decorated).
849 867 """
850 868 return obj.join(fname)
851 869
852 870 def __call__(self, func):
853 871 self.func = func
854 872 self.name = func.__name__
855 873 return self
856 874
857 875 def __get__(self, obj, type=None):
858 876 # do we need to check if the file changed?
859 877 if self.name in obj.__dict__:
860 878 return obj.__dict__[self.name]
861 879
862 880 entry = obj._filecache.get(self.name)
863 881
864 882 if entry:
865 883 if entry.changed():
866 884 entry.obj = self.func(obj)
867 885 else:
868 886 path = self.join(obj, self.path)
869 887
870 888 # We stat -before- creating the object so our cache doesn't lie if
871 889 # a writer modified between the time we read and stat
872 890 entry = filecacheentry(path)
873 891 entry.obj = self.func(obj)
874 892
875 893 obj._filecache[self.name] = entry
876 894
877 895 obj.__dict__[self.name] = entry.obj
878 896 return entry.obj
879 897
880 898 def __set__(self, obj, value):
881 899 if self.name in obj._filecache:
882 900 obj._filecache[self.name].obj = value # update cached copy
883 901 obj.__dict__[self.name] = value # update copy returned by obj.x
884 902
885 903 def __delete__(self, obj):
886 904 try:
887 905 del obj.__dict__[self.name]
888 906 except KeyError:
889 907 raise AttributeError, self.name
General Comments 0
You need to be logged in to leave comments. Login now