##// END OF EJS Templates
localrepo: optimize internode status calls using match.always...
Jesse Glick -
r16645:9a21fc2c default
parent child Browse files
Show More
@@ -1,2348 +1,2350 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class storecache(filecache):
23 23 """filecache for files in the store"""
24 24 def join(self, obj, fname):
25 25 return obj.sjoin(fname)
26 26
27 27 class localrepository(repo.repository):
28 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 29 'known', 'getbundle'))
30 30 supportedformats = set(('revlogv1', 'generaldelta'))
31 31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 32 'dotencode'))
33 33
34 34 def __init__(self, baseui, path=None, create=False):
35 35 repo.repository.__init__(self)
36 36 self.root = os.path.realpath(util.expandpath(path))
37 37 self.path = os.path.join(self.root, ".hg")
38 38 self.origroot = path
39 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 40 self.opener = scmutil.opener(self.path)
41 41 self.wopener = scmutil.opener(self.root)
42 42 self.baseui = baseui
43 43 self.ui = baseui.copy()
44 44 self._dirtyphases = False
45 45 # A list of callback to shape the phase if no data were found.
46 46 # Callback are in the form: func(repo, roots) --> processed root.
47 47 # This list it to be filled by extension during repo setup
48 48 self._phasedefaults = []
49 49
50 50 try:
51 51 self.ui.readconfig(self.join("hgrc"), self.root)
52 52 extensions.loadall(self.ui)
53 53 except IOError:
54 54 pass
55 55
56 56 if not os.path.isdir(self.path):
57 57 if create:
58 58 if not os.path.exists(path):
59 59 util.makedirs(path)
60 60 util.makedir(self.path, notindexed=True)
61 61 requirements = ["revlogv1"]
62 62 if self.ui.configbool('format', 'usestore', True):
63 63 os.mkdir(os.path.join(self.path, "store"))
64 64 requirements.append("store")
65 65 if self.ui.configbool('format', 'usefncache', True):
66 66 requirements.append("fncache")
67 67 if self.ui.configbool('format', 'dotencode', True):
68 68 requirements.append('dotencode')
69 69 # create an invalid changelog
70 70 self.opener.append(
71 71 "00changelog.i",
72 72 '\0\0\0\2' # represents revlogv2
73 73 ' dummy changelog to prevent using the old repo layout'
74 74 )
75 75 if self.ui.configbool('format', 'generaldelta', False):
76 76 requirements.append("generaldelta")
77 77 requirements = set(requirements)
78 78 else:
79 79 raise error.RepoError(_("repository %s not found") % path)
80 80 elif create:
81 81 raise error.RepoError(_("repository %s already exists") % path)
82 82 else:
83 83 try:
84 84 requirements = scmutil.readrequires(self.opener, self.supported)
85 85 except IOError, inst:
86 86 if inst.errno != errno.ENOENT:
87 87 raise
88 88 requirements = set()
89 89
90 90 self.sharedpath = self.path
91 91 try:
92 92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 93 if not os.path.exists(s):
94 94 raise error.RepoError(
95 95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 96 self.sharedpath = s
97 97 except IOError, inst:
98 98 if inst.errno != errno.ENOENT:
99 99 raise
100 100
101 101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 102 self.spath = self.store.path
103 103 self.sopener = self.store.opener
104 104 self.sjoin = self.store.join
105 105 self.opener.createmode = self.store.createmode
106 106 self._applyrequirements(requirements)
107 107 if create:
108 108 self._writerequirements()
109 109
110 110
111 111 self._branchcache = None
112 112 self._branchcachetip = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 # A cache for various files under .hg/ that tracks file changes,
118 118 # (used by the filecache decorator)
119 119 #
120 120 # Maps a property name to its util.filecacheentry
121 121 self._filecache = {}
122 122
123 123 def _applyrequirements(self, requirements):
124 124 self.requirements = requirements
125 125 openerreqs = set(('revlogv1', 'generaldelta'))
126 126 self.sopener.options = dict((r, 1) for r in requirements
127 127 if r in openerreqs)
128 128
129 129 def _writerequirements(self):
130 130 reqfile = self.opener("requires", "w")
131 131 for r in self.requirements:
132 132 reqfile.write("%s\n" % r)
133 133 reqfile.close()
134 134
135 135 def _checknested(self, path):
136 136 """Determine if path is a legal nested repository."""
137 137 if not path.startswith(self.root):
138 138 return False
139 139 subpath = path[len(self.root) + 1:]
140 140 normsubpath = util.pconvert(subpath)
141 141
142 142 # XXX: Checking against the current working copy is wrong in
143 143 # the sense that it can reject things like
144 144 #
145 145 # $ hg cat -r 10 sub/x.txt
146 146 #
147 147 # if sub/ is no longer a subrepository in the working copy
148 148 # parent revision.
149 149 #
150 150 # However, it can of course also allow things that would have
151 151 # been rejected before, such as the above cat command if sub/
152 152 # is a subrepository now, but was a normal directory before.
153 153 # The old path auditor would have rejected by mistake since it
154 154 # panics when it sees sub/.hg/.
155 155 #
156 156 # All in all, checking against the working copy seems sensible
157 157 # since we want to prevent access to nested repositories on
158 158 # the filesystem *now*.
159 159 ctx = self[None]
160 160 parts = util.splitpath(subpath)
161 161 while parts:
162 162 prefix = '/'.join(parts)
163 163 if prefix in ctx.substate:
164 164 if prefix == normsubpath:
165 165 return True
166 166 else:
167 167 sub = ctx.sub(prefix)
168 168 return sub.checknested(subpath[len(prefix) + 1:])
169 169 else:
170 170 parts.pop()
171 171 return False
172 172
173 173 @filecache('bookmarks')
174 174 def _bookmarks(self):
175 175 return bookmarks.read(self)
176 176
177 177 @filecache('bookmarks.current')
178 178 def _bookmarkcurrent(self):
179 179 return bookmarks.readcurrent(self)
180 180
181 181 def _writebookmarks(self, marks):
182 182 bookmarks.write(self)
183 183
184 184 @storecache('phaseroots')
185 185 def _phaseroots(self):
186 186 phaseroots, self._dirtyphases = phases.readroots(
187 187 self, self._phasedefaults)
188 188 return phaseroots
189 189
190 190 @propertycache
191 191 def _phaserev(self):
192 192 cache = [phases.public] * len(self)
193 193 for phase in phases.trackedphases:
194 194 roots = map(self.changelog.rev, self._phaseroots[phase])
195 195 if roots:
196 196 for rev in roots:
197 197 cache[rev] = phase
198 198 for rev in self.changelog.descendants(*roots):
199 199 cache[rev] = phase
200 200 return cache
201 201
202 202 @storecache('00changelog.i')
203 203 def changelog(self):
204 204 c = changelog.changelog(self.sopener)
205 205 if 'HG_PENDING' in os.environ:
206 206 p = os.environ['HG_PENDING']
207 207 if p.startswith(self.root):
208 208 c.readpending('00changelog.i.a')
209 209 return c
210 210
211 211 @storecache('00manifest.i')
212 212 def manifest(self):
213 213 return manifest.manifest(self.sopener)
214 214
215 215 @filecache('dirstate')
216 216 def dirstate(self):
217 217 warned = [0]
218 218 def validate(node):
219 219 try:
220 220 self.changelog.rev(node)
221 221 return node
222 222 except error.LookupError:
223 223 if not warned[0]:
224 224 warned[0] = True
225 225 self.ui.warn(_("warning: ignoring unknown"
226 226 " working parent %s!\n") % short(node))
227 227 return nullid
228 228
229 229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230 230
231 231 def __getitem__(self, changeid):
232 232 if changeid is None:
233 233 return context.workingctx(self)
234 234 return context.changectx(self, changeid)
235 235
236 236 def __contains__(self, changeid):
237 237 try:
238 238 return bool(self.lookup(changeid))
239 239 except error.RepoLookupError:
240 240 return False
241 241
242 242 def __nonzero__(self):
243 243 return True
244 244
245 245 def __len__(self):
246 246 return len(self.changelog)
247 247
248 248 def __iter__(self):
249 249 for i in xrange(len(self)):
250 250 yield i
251 251
252 252 def revs(self, expr, *args):
253 253 '''Return a list of revisions matching the given revset'''
254 254 expr = revset.formatspec(expr, *args)
255 255 m = revset.match(None, expr)
256 256 return [r for r in m(self, range(len(self)))]
257 257
258 258 def set(self, expr, *args):
259 259 '''
260 260 Yield a context for each matching revision, after doing arg
261 261 replacement via revset.formatspec
262 262 '''
263 263 for r in self.revs(expr, *args):
264 264 yield self[r]
265 265
266 266 def url(self):
267 267 return 'file:' + self.root
268 268
269 269 def hook(self, name, throw=False, **args):
270 270 return hook.hook(self.ui, self, name, throw, **args)
271 271
272 272 tag_disallowed = ':\r\n'
273 273
274 274 def _tag(self, names, node, message, local, user, date, extra={}):
275 275 if isinstance(names, str):
276 276 allchars = names
277 277 names = (names,)
278 278 else:
279 279 allchars = ''.join(names)
280 280 for c in self.tag_disallowed:
281 281 if c in allchars:
282 282 raise util.Abort(_('%r cannot be used in a tag name') % c)
283 283
284 284 branches = self.branchmap()
285 285 for name in names:
286 286 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 287 local=local)
288 288 if name in branches:
289 289 self.ui.warn(_("warning: tag %s conflicts with existing"
290 290 " branch name\n") % name)
291 291
292 292 def writetags(fp, names, munge, prevtags):
293 293 fp.seek(0, 2)
294 294 if prevtags and prevtags[-1] != '\n':
295 295 fp.write('\n')
296 296 for name in names:
297 297 m = munge and munge(name) or name
298 298 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
299 299 old = self.tags().get(name, nullid)
300 300 fp.write('%s %s\n' % (hex(old), m))
301 301 fp.write('%s %s\n' % (hex(node), m))
302 302 fp.close()
303 303
304 304 prevtags = ''
305 305 if local:
306 306 try:
307 307 fp = self.opener('localtags', 'r+')
308 308 except IOError:
309 309 fp = self.opener('localtags', 'a')
310 310 else:
311 311 prevtags = fp.read()
312 312
313 313 # local tags are stored in the current charset
314 314 writetags(fp, names, None, prevtags)
315 315 for name in names:
316 316 self.hook('tag', node=hex(node), tag=name, local=local)
317 317 return
318 318
319 319 try:
320 320 fp = self.wfile('.hgtags', 'rb+')
321 321 except IOError, e:
322 322 if e.errno != errno.ENOENT:
323 323 raise
324 324 fp = self.wfile('.hgtags', 'ab')
325 325 else:
326 326 prevtags = fp.read()
327 327
328 328 # committed tags are stored in UTF-8
329 329 writetags(fp, names, encoding.fromlocal, prevtags)
330 330
331 331 fp.close()
332 332
333 333 self.invalidatecaches()
334 334
335 335 if '.hgtags' not in self.dirstate:
336 336 self[None].add(['.hgtags'])
337 337
338 338 m = matchmod.exact(self.root, '', ['.hgtags'])
339 339 tagnode = self.commit(message, user, date, extra=extra, match=m)
340 340
341 341 for name in names:
342 342 self.hook('tag', node=hex(node), tag=name, local=local)
343 343
344 344 return tagnode
345 345
346 346 def tag(self, names, node, message, local, user, date):
347 347 '''tag a revision with one or more symbolic names.
348 348
349 349 names is a list of strings or, when adding a single tag, names may be a
350 350 string.
351 351
352 352 if local is True, the tags are stored in a per-repository file.
353 353 otherwise, they are stored in the .hgtags file, and a new
354 354 changeset is committed with the change.
355 355
356 356 keyword arguments:
357 357
358 358 local: whether to store tags in non-version-controlled file
359 359 (default False)
360 360
361 361 message: commit message to use if committing
362 362
363 363 user: name of user to use if committing
364 364
365 365 date: date tuple to use if committing'''
366 366
367 367 if not local:
368 368 for x in self.status()[:5]:
369 369 if '.hgtags' in x:
370 370 raise util.Abort(_('working copy of .hgtags is changed '
371 371 '(please commit .hgtags manually)'))
372 372
373 373 self.tags() # instantiate the cache
374 374 self._tag(names, node, message, local, user, date)
375 375
376 376 @propertycache
377 377 def _tagscache(self):
378 378 '''Returns a tagscache object that contains various tags related caches.'''
379 379
380 380 # This simplifies its cache management by having one decorated
381 381 # function (this one) and the rest simply fetch things from it.
382 382 class tagscache(object):
383 383 def __init__(self):
384 384 # These two define the set of tags for this repository. tags
385 385 # maps tag name to node; tagtypes maps tag name to 'global' or
386 386 # 'local'. (Global tags are defined by .hgtags across all
387 387 # heads, and local tags are defined in .hg/localtags.)
388 388 # They constitute the in-memory cache of tags.
389 389 self.tags = self.tagtypes = None
390 390
391 391 self.nodetagscache = self.tagslist = None
392 392
393 393 cache = tagscache()
394 394 cache.tags, cache.tagtypes = self._findtags()
395 395
396 396 return cache
397 397
398 398 def tags(self):
399 399 '''return a mapping of tag to node'''
400 400 t = {}
401 401 for k, v in self._tagscache.tags.iteritems():
402 402 try:
403 403 # ignore tags to unknown nodes
404 404 self.changelog.rev(v)
405 405 t[k] = v
406 406 except error.LookupError:
407 407 pass
408 408 return t
409 409
410 410 def _findtags(self):
411 411 '''Do the hard work of finding tags. Return a pair of dicts
412 412 (tags, tagtypes) where tags maps tag name to node, and tagtypes
413 413 maps tag name to a string like \'global\' or \'local\'.
414 414 Subclasses or extensions are free to add their own tags, but
415 415 should be aware that the returned dicts will be retained for the
416 416 duration of the localrepo object.'''
417 417
418 418 # XXX what tagtype should subclasses/extensions use? Currently
419 419 # mq and bookmarks add tags, but do not set the tagtype at all.
420 420 # Should each extension invent its own tag type? Should there
421 421 # be one tagtype for all such "virtual" tags? Or is the status
422 422 # quo fine?
423 423
424 424 alltags = {} # map tag name to (node, hist)
425 425 tagtypes = {}
426 426
427 427 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
428 428 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
429 429
430 430 # Build the return dicts. Have to re-encode tag names because
431 431 # the tags module always uses UTF-8 (in order not to lose info
432 432 # writing to the cache), but the rest of Mercurial wants them in
433 433 # local encoding.
434 434 tags = {}
435 435 for (name, (node, hist)) in alltags.iteritems():
436 436 if node != nullid:
437 437 tags[encoding.tolocal(name)] = node
438 438 tags['tip'] = self.changelog.tip()
439 439 tagtypes = dict([(encoding.tolocal(name), value)
440 440 for (name, value) in tagtypes.iteritems()])
441 441 return (tags, tagtypes)
442 442
443 443 def tagtype(self, tagname):
444 444 '''
445 445 return the type of the given tag. result can be:
446 446
447 447 'local' : a local tag
448 448 'global' : a global tag
449 449 None : tag does not exist
450 450 '''
451 451
452 452 return self._tagscache.tagtypes.get(tagname)
453 453
454 454 def tagslist(self):
455 455 '''return a list of tags ordered by revision'''
456 456 if not self._tagscache.tagslist:
457 457 l = []
458 458 for t, n in self.tags().iteritems():
459 459 r = self.changelog.rev(n)
460 460 l.append((r, t, n))
461 461 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
462 462
463 463 return self._tagscache.tagslist
464 464
465 465 def nodetags(self, node):
466 466 '''return the tags associated with a node'''
467 467 if not self._tagscache.nodetagscache:
468 468 nodetagscache = {}
469 469 for t, n in self._tagscache.tags.iteritems():
470 470 nodetagscache.setdefault(n, []).append(t)
471 471 for tags in nodetagscache.itervalues():
472 472 tags.sort()
473 473 self._tagscache.nodetagscache = nodetagscache
474 474 return self._tagscache.nodetagscache.get(node, [])
475 475
476 476 def nodebookmarks(self, node):
477 477 marks = []
478 478 for bookmark, n in self._bookmarks.iteritems():
479 479 if n == node:
480 480 marks.append(bookmark)
481 481 return sorted(marks)
482 482
483 483 def _branchtags(self, partial, lrev):
484 484 # TODO: rename this function?
485 485 tiprev = len(self) - 1
486 486 if lrev != tiprev:
487 487 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
488 488 self._updatebranchcache(partial, ctxgen)
489 489 self._writebranchcache(partial, self.changelog.tip(), tiprev)
490 490
491 491 return partial
492 492
493 493 def updatebranchcache(self):
494 494 tip = self.changelog.tip()
495 495 if self._branchcache is not None and self._branchcachetip == tip:
496 496 return
497 497
498 498 oldtip = self._branchcachetip
499 499 self._branchcachetip = tip
500 500 if oldtip is None or oldtip not in self.changelog.nodemap:
501 501 partial, last, lrev = self._readbranchcache()
502 502 else:
503 503 lrev = self.changelog.rev(oldtip)
504 504 partial = self._branchcache
505 505
506 506 self._branchtags(partial, lrev)
507 507 # this private cache holds all heads (not just the branch tips)
508 508 self._branchcache = partial
509 509
510 510 def branchmap(self):
511 511 '''returns a dictionary {branch: [branchheads]}'''
512 512 self.updatebranchcache()
513 513 return self._branchcache
514 514
515 515 def branchtags(self):
516 516 '''return a dict where branch names map to the tipmost head of
517 517 the branch, open heads come before closed'''
518 518 bt = {}
519 519 for bn, heads in self.branchmap().iteritems():
520 520 tip = heads[-1]
521 521 for h in reversed(heads):
522 522 if 'close' not in self.changelog.read(h)[5]:
523 523 tip = h
524 524 break
525 525 bt[bn] = tip
526 526 return bt
527 527
528 528 def _readbranchcache(self):
529 529 partial = {}
530 530 try:
531 531 f = self.opener("cache/branchheads")
532 532 lines = f.read().split('\n')
533 533 f.close()
534 534 except (IOError, OSError):
535 535 return {}, nullid, nullrev
536 536
537 537 try:
538 538 last, lrev = lines.pop(0).split(" ", 1)
539 539 last, lrev = bin(last), int(lrev)
540 540 if lrev >= len(self) or self[lrev].node() != last:
541 541 # invalidate the cache
542 542 raise ValueError('invalidating branch cache (tip differs)')
543 543 for l in lines:
544 544 if not l:
545 545 continue
546 546 node, label = l.split(" ", 1)
547 547 label = encoding.tolocal(label.strip())
548 548 partial.setdefault(label, []).append(bin(node))
549 549 except KeyboardInterrupt:
550 550 raise
551 551 except Exception, inst:
552 552 if self.ui.debugflag:
553 553 self.ui.warn(str(inst), '\n')
554 554 partial, last, lrev = {}, nullid, nullrev
555 555 return partial, last, lrev
556 556
557 557 def _writebranchcache(self, branches, tip, tiprev):
558 558 try:
559 559 f = self.opener("cache/branchheads", "w", atomictemp=True)
560 560 f.write("%s %s\n" % (hex(tip), tiprev))
561 561 for label, nodes in branches.iteritems():
562 562 for node in nodes:
563 563 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
564 564 f.close()
565 565 except (IOError, OSError):
566 566 pass
567 567
568 568 def _updatebranchcache(self, partial, ctxgen):
569 569 # collect new branch entries
570 570 newbranches = {}
571 571 for c in ctxgen:
572 572 newbranches.setdefault(c.branch(), []).append(c.node())
573 573 # if older branchheads are reachable from new ones, they aren't
574 574 # really branchheads. Note checking parents is insufficient:
575 575 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
576 576 for branch, newnodes in newbranches.iteritems():
577 577 bheads = partial.setdefault(branch, [])
578 578 bheads.extend(newnodes)
579 579 if len(bheads) <= 1:
580 580 continue
581 581 bheads = sorted(bheads, key=lambda x: self[x].rev())
582 582 # starting from tip means fewer passes over reachable
583 583 while newnodes:
584 584 latest = newnodes.pop()
585 585 if latest not in bheads:
586 586 continue
587 587 minbhnode = self[bheads[0]].node()
588 588 reachable = self.changelog.reachable(latest, minbhnode)
589 589 reachable.remove(latest)
590 590 if reachable:
591 591 bheads = [b for b in bheads if b not in reachable]
592 592 partial[branch] = bheads
593 593
594 594 def lookup(self, key):
595 595 return self[key].node()
596 596
597 597 def lookupbranch(self, key, remote=None):
598 598 repo = remote or self
599 599 if key in repo.branchmap():
600 600 return key
601 601
602 602 repo = (remote and remote.local()) and remote or self
603 603 return repo[key].branch()
604 604
605 605 def known(self, nodes):
606 606 nm = self.changelog.nodemap
607 607 result = []
608 608 for n in nodes:
609 609 r = nm.get(n)
610 610 resp = not (r is None or self._phaserev[r] >= phases.secret)
611 611 result.append(resp)
612 612 return result
613 613
614 614 def local(self):
615 615 return self
616 616
617 617 def join(self, f):
618 618 return os.path.join(self.path, f)
619 619
620 620 def wjoin(self, f):
621 621 return os.path.join(self.root, f)
622 622
623 623 def file(self, f):
624 624 if f[0] == '/':
625 625 f = f[1:]
626 626 return filelog.filelog(self.sopener, f)
627 627
628 628 def changectx(self, changeid):
629 629 return self[changeid]
630 630
631 631 def parents(self, changeid=None):
632 632 '''get list of changectxs for parents of changeid'''
633 633 return self[changeid].parents()
634 634
635 635 def setparents(self, p1, p2=nullid):
636 636 copies = self.dirstate.setparents(p1, p2)
637 637 if copies:
638 638 # Adjust copy records, the dirstate cannot do it, it
639 639 # requires access to parents manifests. Preserve them
640 640 # only for entries added to first parent.
641 641 pctx = self[p1]
642 642 for f in copies:
643 643 if f not in pctx and copies[f] in pctx:
644 644 self.dirstate.copy(copies[f], f)
645 645
646 646 def filectx(self, path, changeid=None, fileid=None):
647 647 """changeid can be a changeset revision, node, or tag.
648 648 fileid can be a file revision or node."""
649 649 return context.filectx(self, path, changeid, fileid)
650 650
651 651 def getcwd(self):
652 652 return self.dirstate.getcwd()
653 653
654 654 def pathto(self, f, cwd=None):
655 655 return self.dirstate.pathto(f, cwd)
656 656
657 657 def wfile(self, f, mode='r'):
658 658 return self.wopener(f, mode)
659 659
660 660 def _link(self, f):
661 661 return os.path.islink(self.wjoin(f))
662 662
663 663 def _loadfilter(self, filter):
664 664 if filter not in self.filterpats:
665 665 l = []
666 666 for pat, cmd in self.ui.configitems(filter):
667 667 if cmd == '!':
668 668 continue
669 669 mf = matchmod.match(self.root, '', [pat])
670 670 fn = None
671 671 params = cmd
672 672 for name, filterfn in self._datafilters.iteritems():
673 673 if cmd.startswith(name):
674 674 fn = filterfn
675 675 params = cmd[len(name):].lstrip()
676 676 break
677 677 if not fn:
678 678 fn = lambda s, c, **kwargs: util.filter(s, c)
679 679 # Wrap old filters not supporting keyword arguments
680 680 if not inspect.getargspec(fn)[2]:
681 681 oldfn = fn
682 682 fn = lambda s, c, **kwargs: oldfn(s, c)
683 683 l.append((mf, fn, params))
684 684 self.filterpats[filter] = l
685 685 return self.filterpats[filter]
686 686
687 687 def _filter(self, filterpats, filename, data):
688 688 for mf, fn, cmd in filterpats:
689 689 if mf(filename):
690 690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 692 break
693 693
694 694 return data
695 695
696 696 @propertycache
697 697 def _encodefilterpats(self):
698 698 return self._loadfilter('encode')
699 699
700 700 @propertycache
701 701 def _decodefilterpats(self):
702 702 return self._loadfilter('decode')
703 703
704 704 def adddatafilter(self, name, filter):
705 705 self._datafilters[name] = filter
706 706
707 707 def wread(self, filename):
708 708 if self._link(filename):
709 709 data = os.readlink(self.wjoin(filename))
710 710 else:
711 711 data = self.wopener.read(filename)
712 712 return self._filter(self._encodefilterpats, filename, data)
713 713
714 714 def wwrite(self, filename, data, flags):
715 715 data = self._filter(self._decodefilterpats, filename, data)
716 716 if 'l' in flags:
717 717 self.wopener.symlink(data, filename)
718 718 else:
719 719 self.wopener.write(filename, data)
720 720 if 'x' in flags:
721 721 util.setflags(self.wjoin(filename), False, True)
722 722
723 723 def wwritedata(self, filename, data):
724 724 return self._filter(self._decodefilterpats, filename, data)
725 725
726 726 def transaction(self, desc):
727 727 tr = self._transref and self._transref() or None
728 728 if tr and tr.running():
729 729 return tr.nest()
730 730
731 731 # abort here if the journal already exists
732 732 if os.path.exists(self.sjoin("journal")):
733 733 raise error.RepoError(
734 734 _("abandoned transaction found - run hg recover"))
735 735
736 736 self._writejournal(desc)
737 737 renames = [(x, undoname(x)) for x in self._journalfiles()]
738 738
739 739 tr = transaction.transaction(self.ui.warn, self.sopener,
740 740 self.sjoin("journal"),
741 741 aftertrans(renames),
742 742 self.store.createmode)
743 743 self._transref = weakref.ref(tr)
744 744 return tr
745 745
746 746 def _journalfiles(self):
747 747 return (self.sjoin('journal'), self.join('journal.dirstate'),
748 748 self.join('journal.branch'), self.join('journal.desc'),
749 749 self.join('journal.bookmarks'),
750 750 self.sjoin('journal.phaseroots'))
751 751
752 752 def undofiles(self):
753 753 return [undoname(x) for x in self._journalfiles()]
754 754
755 755 def _writejournal(self, desc):
756 756 self.opener.write("journal.dirstate",
757 757 self.opener.tryread("dirstate"))
758 758 self.opener.write("journal.branch",
759 759 encoding.fromlocal(self.dirstate.branch()))
760 760 self.opener.write("journal.desc",
761 761 "%d\n%s\n" % (len(self), desc))
762 762 self.opener.write("journal.bookmarks",
763 763 self.opener.tryread("bookmarks"))
764 764 self.sopener.write("journal.phaseroots",
765 765 self.sopener.tryread("phaseroots"))
766 766
767 767 def recover(self):
768 768 lock = self.lock()
769 769 try:
770 770 if os.path.exists(self.sjoin("journal")):
771 771 self.ui.status(_("rolling back interrupted transaction\n"))
772 772 transaction.rollback(self.sopener, self.sjoin("journal"),
773 773 self.ui.warn)
774 774 self.invalidate()
775 775 return True
776 776 else:
777 777 self.ui.warn(_("no interrupted transaction available\n"))
778 778 return False
779 779 finally:
780 780 lock.release()
781 781
782 782 def rollback(self, dryrun=False, force=False):
783 783 wlock = lock = None
784 784 try:
785 785 wlock = self.wlock()
786 786 lock = self.lock()
787 787 if os.path.exists(self.sjoin("undo")):
788 788 return self._rollback(dryrun, force)
789 789 else:
790 790 self.ui.warn(_("no rollback information available\n"))
791 791 return 1
792 792 finally:
793 793 release(lock, wlock)
794 794
795 795 def _rollback(self, dryrun, force):
796 796 ui = self.ui
797 797 try:
798 798 args = self.opener.read('undo.desc').splitlines()
799 799 (oldlen, desc, detail) = (int(args[0]), args[1], None)
800 800 if len(args) >= 3:
801 801 detail = args[2]
802 802 oldtip = oldlen - 1
803 803
804 804 if detail and ui.verbose:
805 805 msg = (_('repository tip rolled back to revision %s'
806 806 ' (undo %s: %s)\n')
807 807 % (oldtip, desc, detail))
808 808 else:
809 809 msg = (_('repository tip rolled back to revision %s'
810 810 ' (undo %s)\n')
811 811 % (oldtip, desc))
812 812 except IOError:
813 813 msg = _('rolling back unknown transaction\n')
814 814 desc = None
815 815
816 816 if not force and self['.'] != self['tip'] and desc == 'commit':
817 817 raise util.Abort(
818 818 _('rollback of last commit while not checked out '
819 819 'may lose data'), hint=_('use -f to force'))
820 820
821 821 ui.status(msg)
822 822 if dryrun:
823 823 return 0
824 824
825 825 parents = self.dirstate.parents()
826 826 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
827 827 if os.path.exists(self.join('undo.bookmarks')):
828 828 util.rename(self.join('undo.bookmarks'),
829 829 self.join('bookmarks'))
830 830 if os.path.exists(self.sjoin('undo.phaseroots')):
831 831 util.rename(self.sjoin('undo.phaseroots'),
832 832 self.sjoin('phaseroots'))
833 833 self.invalidate()
834 834
835 835 parentgone = (parents[0] not in self.changelog.nodemap or
836 836 parents[1] not in self.changelog.nodemap)
837 837 if parentgone:
838 838 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
839 839 try:
840 840 branch = self.opener.read('undo.branch')
841 841 self.dirstate.setbranch(branch)
842 842 except IOError:
843 843 ui.warn(_('named branch could not be reset: '
844 844 'current branch is still \'%s\'\n')
845 845 % self.dirstate.branch())
846 846
847 847 self.dirstate.invalidate()
848 848 parents = tuple([p.rev() for p in self.parents()])
849 849 if len(parents) > 1:
850 850 ui.status(_('working directory now based on '
851 851 'revisions %d and %d\n') % parents)
852 852 else:
853 853 ui.status(_('working directory now based on '
854 854 'revision %d\n') % parents)
855 855 self.destroyed()
856 856 return 0
857 857
858 858 def invalidatecaches(self):
859 859 def delcache(name):
860 860 try:
861 861 delattr(self, name)
862 862 except AttributeError:
863 863 pass
864 864
865 865 delcache('_tagscache')
866 866 delcache('_phaserev')
867 867
868 868 self._branchcache = None # in UTF-8
869 869 self._branchcachetip = None
870 870
871 871 def invalidatedirstate(self):
872 872 '''Invalidates the dirstate, causing the next call to dirstate
873 873 to check if it was modified since the last time it was read,
874 874 rereading it if it has.
875 875
876 876 This is different to dirstate.invalidate() that it doesn't always
877 877 rereads the dirstate. Use dirstate.invalidate() if you want to
878 878 explicitly read the dirstate again (i.e. restoring it to a previous
879 879 known good state).'''
880 880 if 'dirstate' in self.__dict__:
881 881 for k in self.dirstate._filecache:
882 882 try:
883 883 delattr(self.dirstate, k)
884 884 except AttributeError:
885 885 pass
886 886 delattr(self, 'dirstate')
887 887
888 888 def invalidate(self):
889 889 for k in self._filecache:
890 890 # dirstate is invalidated separately in invalidatedirstate()
891 891 if k == 'dirstate':
892 892 continue
893 893
894 894 try:
895 895 delattr(self, k)
896 896 except AttributeError:
897 897 pass
898 898 self.invalidatecaches()
899 899
900 900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 901 try:
902 902 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 903 except error.LockHeld, inst:
904 904 if not wait:
905 905 raise
906 906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 907 (desc, inst.locker))
908 908 # default to 600 seconds timeout
909 909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 910 releasefn, desc=desc)
911 911 if acquirefn:
912 912 acquirefn()
913 913 return l
914 914
915 915 def _afterlock(self, callback):
916 916 """add a callback to the current repository lock.
917 917
918 918 The callback will be executed on lock release."""
919 919 l = self._lockref and self._lockref()
920 920 if l:
921 921 l.postrelease.append(callback)
922 922
923 923 def lock(self, wait=True):
924 924 '''Lock the repository store (.hg/store) and return a weak reference
925 925 to the lock. Use this before modifying the store (e.g. committing or
926 926 stripping). If you are opening a transaction, get a lock as well.)'''
927 927 l = self._lockref and self._lockref()
928 928 if l is not None and l.held:
929 929 l.lock()
930 930 return l
931 931
932 932 def unlock():
933 933 self.store.write()
934 934 if self._dirtyphases:
935 935 phases.writeroots(self, self._phaseroots)
936 936 self._dirtyphases = False
937 937 for k, ce in self._filecache.items():
938 938 if k == 'dirstate':
939 939 continue
940 940 ce.refresh()
941 941
942 942 l = self._lock(self.sjoin("lock"), wait, unlock,
943 943 self.invalidate, _('repository %s') % self.origroot)
944 944 self._lockref = weakref.ref(l)
945 945 return l
946 946
947 947 def wlock(self, wait=True):
948 948 '''Lock the non-store parts of the repository (everything under
949 949 .hg except .hg/store) and return a weak reference to the lock.
950 950 Use this before modifying files in .hg.'''
951 951 l = self._wlockref and self._wlockref()
952 952 if l is not None and l.held:
953 953 l.lock()
954 954 return l
955 955
956 956 def unlock():
957 957 self.dirstate.write()
958 958 ce = self._filecache.get('dirstate')
959 959 if ce:
960 960 ce.refresh()
961 961
962 962 l = self._lock(self.join("wlock"), wait, unlock,
963 963 self.invalidatedirstate, _('working directory of %s') %
964 964 self.origroot)
965 965 self._wlockref = weakref.ref(l)
966 966 return l
967 967
968 968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
969 969 """
970 970 commit an individual file as part of a larger transaction
971 971 """
972 972
973 973 fname = fctx.path()
974 974 text = fctx.data()
975 975 flog = self.file(fname)
976 976 fparent1 = manifest1.get(fname, nullid)
977 977 fparent2 = fparent2o = manifest2.get(fname, nullid)
978 978
979 979 meta = {}
980 980 copy = fctx.renamed()
981 981 if copy and copy[0] != fname:
982 982 # Mark the new revision of this file as a copy of another
983 983 # file. This copy data will effectively act as a parent
984 984 # of this new revision. If this is a merge, the first
985 985 # parent will be the nullid (meaning "look up the copy data")
986 986 # and the second one will be the other parent. For example:
987 987 #
988 988 # 0 --- 1 --- 3 rev1 changes file foo
989 989 # \ / rev2 renames foo to bar and changes it
990 990 # \- 2 -/ rev3 should have bar with all changes and
991 991 # should record that bar descends from
992 992 # bar in rev2 and foo in rev1
993 993 #
994 994 # this allows this merge to succeed:
995 995 #
996 996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
997 997 # \ / merging rev3 and rev4 should use bar@rev2
998 998 # \- 2 --- 4 as the merge base
999 999 #
1000 1000
1001 1001 cfname = copy[0]
1002 1002 crev = manifest1.get(cfname)
1003 1003 newfparent = fparent2
1004 1004
1005 1005 if manifest2: # branch merge
1006 1006 if fparent2 == nullid or crev is None: # copied on remote side
1007 1007 if cfname in manifest2:
1008 1008 crev = manifest2[cfname]
1009 1009 newfparent = fparent1
1010 1010
1011 1011 # find source in nearest ancestor if we've lost track
1012 1012 if not crev:
1013 1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1014 1014 (fname, cfname))
1015 1015 for ancestor in self[None].ancestors():
1016 1016 if cfname in ancestor:
1017 1017 crev = ancestor[cfname].filenode()
1018 1018 break
1019 1019
1020 1020 if crev:
1021 1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1022 1022 meta["copy"] = cfname
1023 1023 meta["copyrev"] = hex(crev)
1024 1024 fparent1, fparent2 = nullid, newfparent
1025 1025 else:
1026 1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1027 1027 "copied from '%s'!\n") % (fname, cfname))
1028 1028
1029 1029 elif fparent2 != nullid:
1030 1030 # is one parent an ancestor of the other?
1031 1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1032 1032 if fparentancestor == fparent1:
1033 1033 fparent1, fparent2 = fparent2, nullid
1034 1034 elif fparentancestor == fparent2:
1035 1035 fparent2 = nullid
1036 1036
1037 1037 # is the file changed?
1038 1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1039 1039 changelist.append(fname)
1040 1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1041 1041
1042 1042 # are just the flags changed during merge?
1043 1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1044 1044 changelist.append(fname)
1045 1045
1046 1046 return fparent1
1047 1047
1048 1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1049 1049 editor=False, extra={}):
1050 1050 """Add a new revision to current repository.
1051 1051
1052 1052 Revision information is gathered from the working directory,
1053 1053 match can be used to filter the committed files. If editor is
1054 1054 supplied, it is called to get a commit message.
1055 1055 """
1056 1056
1057 1057 def fail(f, msg):
1058 1058 raise util.Abort('%s: %s' % (f, msg))
1059 1059
1060 1060 if not match:
1061 1061 match = matchmod.always(self.root, '')
1062 1062
1063 1063 if not force:
1064 1064 vdirs = []
1065 1065 match.dir = vdirs.append
1066 1066 match.bad = fail
1067 1067
1068 1068 wlock = self.wlock()
1069 1069 try:
1070 1070 wctx = self[None]
1071 1071 merge = len(wctx.parents()) > 1
1072 1072
1073 1073 if (not force and merge and match and
1074 1074 (match.files() or match.anypats())):
1075 1075 raise util.Abort(_('cannot partially commit a merge '
1076 1076 '(do not specify files or patterns)'))
1077 1077
1078 1078 changes = self.status(match=match, clean=force)
1079 1079 if force:
1080 1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1081 1081
1082 1082 # check subrepos
1083 1083 subs = []
1084 1084 commitsubs = set()
1085 1085 newstate = wctx.substate.copy()
1086 1086 # only manage subrepos and .hgsubstate if .hgsub is present
1087 1087 if '.hgsub' in wctx:
1088 1088 # we'll decide whether to track this ourselves, thanks
1089 1089 if '.hgsubstate' in changes[0]:
1090 1090 changes[0].remove('.hgsubstate')
1091 1091 if '.hgsubstate' in changes[2]:
1092 1092 changes[2].remove('.hgsubstate')
1093 1093
1094 1094 # compare current state to last committed state
1095 1095 # build new substate based on last committed state
1096 1096 oldstate = wctx.p1().substate
1097 1097 for s in sorted(newstate.keys()):
1098 1098 if not match(s):
1099 1099 # ignore working copy, use old state if present
1100 1100 if s in oldstate:
1101 1101 newstate[s] = oldstate[s]
1102 1102 continue
1103 1103 if not force:
1104 1104 raise util.Abort(
1105 1105 _("commit with new subrepo %s excluded") % s)
1106 1106 if wctx.sub(s).dirty(True):
1107 1107 if not self.ui.configbool('ui', 'commitsubrepos'):
1108 1108 raise util.Abort(
1109 1109 _("uncommitted changes in subrepo %s") % s,
1110 1110 hint=_("use --subrepos for recursive commit"))
1111 1111 subs.append(s)
1112 1112 commitsubs.add(s)
1113 1113 else:
1114 1114 bs = wctx.sub(s).basestate()
1115 1115 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1116 1116 if oldstate.get(s, (None, None, None))[1] != bs:
1117 1117 subs.append(s)
1118 1118
1119 1119 # check for removed subrepos
1120 1120 for p in wctx.parents():
1121 1121 r = [s for s in p.substate if s not in newstate]
1122 1122 subs += [s for s in r if match(s)]
1123 1123 if subs:
1124 1124 if (not match('.hgsub') and
1125 1125 '.hgsub' in (wctx.modified() + wctx.added())):
1126 1126 raise util.Abort(
1127 1127 _("can't commit subrepos without .hgsub"))
1128 1128 changes[0].insert(0, '.hgsubstate')
1129 1129
1130 1130 elif '.hgsub' in changes[2]:
1131 1131 # clean up .hgsubstate when .hgsub is removed
1132 1132 if ('.hgsubstate' in wctx and
1133 1133 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1134 1134 changes[2].insert(0, '.hgsubstate')
1135 1135
1136 1136 # make sure all explicit patterns are matched
1137 1137 if not force and match.files():
1138 1138 matched = set(changes[0] + changes[1] + changes[2])
1139 1139
1140 1140 for f in match.files():
1141 1141 if f == '.' or f in matched or f in wctx.substate:
1142 1142 continue
1143 1143 if f in changes[3]: # missing
1144 1144 fail(f, _('file not found!'))
1145 1145 if f in vdirs: # visited directory
1146 1146 d = f + '/'
1147 1147 for mf in matched:
1148 1148 if mf.startswith(d):
1149 1149 break
1150 1150 else:
1151 1151 fail(f, _("no match under directory!"))
1152 1152 elif f not in self.dirstate:
1153 1153 fail(f, _("file not tracked!"))
1154 1154
1155 1155 if (not force and not extra.get("close") and not merge
1156 1156 and not (changes[0] or changes[1] or changes[2])
1157 1157 and wctx.branch() == wctx.p1().branch()):
1158 1158 return None
1159 1159
1160 1160 if merge and changes[3]:
1161 1161 raise util.Abort(_("cannot commit merge with missing files"))
1162 1162
1163 1163 ms = mergemod.mergestate(self)
1164 1164 for f in changes[0]:
1165 1165 if f in ms and ms[f] == 'u':
1166 1166 raise util.Abort(_("unresolved merge conflicts "
1167 1167 "(see hg help resolve)"))
1168 1168
1169 1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1170 1170 if editor:
1171 1171 cctx._text = editor(self, cctx, subs)
1172 1172 edited = (text != cctx._text)
1173 1173
1174 1174 # commit subs and write new state
1175 1175 if subs:
1176 1176 for s in sorted(commitsubs):
1177 1177 sub = wctx.sub(s)
1178 1178 self.ui.status(_('committing subrepository %s\n') %
1179 1179 subrepo.subrelpath(sub))
1180 1180 sr = sub.commit(cctx._text, user, date)
1181 1181 newstate[s] = (newstate[s][0], sr)
1182 1182 subrepo.writestate(self, newstate)
1183 1183
1184 1184 # Save commit message in case this transaction gets rolled back
1185 1185 # (e.g. by a pretxncommit hook). Leave the content alone on
1186 1186 # the assumption that the user will use the same editor again.
1187 1187 msgfn = self.savecommitmessage(cctx._text)
1188 1188
1189 1189 p1, p2 = self.dirstate.parents()
1190 1190 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1191 1191 try:
1192 1192 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1193 1193 ret = self.commitctx(cctx, True)
1194 1194 except:
1195 1195 if edited:
1196 1196 self.ui.write(
1197 1197 _('note: commit message saved in %s\n') % msgfn)
1198 1198 raise
1199 1199
1200 1200 # update bookmarks, dirstate and mergestate
1201 1201 bookmarks.update(self, p1, ret)
1202 1202 for f in changes[0] + changes[1]:
1203 1203 self.dirstate.normal(f)
1204 1204 for f in changes[2]:
1205 1205 self.dirstate.drop(f)
1206 1206 self.dirstate.setparents(ret)
1207 1207 ms.reset()
1208 1208 finally:
1209 1209 wlock.release()
1210 1210
1211 1211 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1212 1212 return ret
1213 1213
1214 1214 def commitctx(self, ctx, error=False):
1215 1215 """Add a new revision to current repository.
1216 1216 Revision information is passed via the context argument.
1217 1217 """
1218 1218
1219 1219 tr = lock = None
1220 1220 removed = list(ctx.removed())
1221 1221 p1, p2 = ctx.p1(), ctx.p2()
1222 1222 user = ctx.user()
1223 1223
1224 1224 lock = self.lock()
1225 1225 try:
1226 1226 tr = self.transaction("commit")
1227 1227 trp = weakref.proxy(tr)
1228 1228
1229 1229 if ctx.files():
1230 1230 m1 = p1.manifest().copy()
1231 1231 m2 = p2.manifest()
1232 1232
1233 1233 # check in files
1234 1234 new = {}
1235 1235 changed = []
1236 1236 linkrev = len(self)
1237 1237 for f in sorted(ctx.modified() + ctx.added()):
1238 1238 self.ui.note(f + "\n")
1239 1239 try:
1240 1240 fctx = ctx[f]
1241 1241 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1242 1242 changed)
1243 1243 m1.set(f, fctx.flags())
1244 1244 except OSError, inst:
1245 1245 self.ui.warn(_("trouble committing %s!\n") % f)
1246 1246 raise
1247 1247 except IOError, inst:
1248 1248 errcode = getattr(inst, 'errno', errno.ENOENT)
1249 1249 if error or errcode and errcode != errno.ENOENT:
1250 1250 self.ui.warn(_("trouble committing %s!\n") % f)
1251 1251 raise
1252 1252 else:
1253 1253 removed.append(f)
1254 1254
1255 1255 # update manifest
1256 1256 m1.update(new)
1257 1257 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1258 1258 drop = [f for f in removed if f in m1]
1259 1259 for f in drop:
1260 1260 del m1[f]
1261 1261 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1262 1262 p2.manifestnode(), (new, drop))
1263 1263 files = changed + removed
1264 1264 else:
1265 1265 mn = p1.manifestnode()
1266 1266 files = []
1267 1267
1268 1268 # update changelog
1269 1269 self.changelog.delayupdate()
1270 1270 n = self.changelog.add(mn, files, ctx.description(),
1271 1271 trp, p1.node(), p2.node(),
1272 1272 user, ctx.date(), ctx.extra().copy())
1273 1273 p = lambda: self.changelog.writepending() and self.root or ""
1274 1274 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1275 1275 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1276 1276 parent2=xp2, pending=p)
1277 1277 self.changelog.finalize(trp)
1278 1278 # set the new commit is proper phase
1279 1279 targetphase = phases.newcommitphase(self.ui)
1280 1280 if targetphase:
1281 1281 # retract boundary do not alter parent changeset.
1282 1282 # if a parent have higher the resulting phase will
1283 1283 # be compliant anyway
1284 1284 #
1285 1285 # if minimal phase was 0 we don't need to retract anything
1286 1286 phases.retractboundary(self, targetphase, [n])
1287 1287 tr.close()
1288 1288 self.updatebranchcache()
1289 1289 return n
1290 1290 finally:
1291 1291 if tr:
1292 1292 tr.release()
1293 1293 lock.release()
1294 1294
1295 1295 def destroyed(self):
1296 1296 '''Inform the repository that nodes have been destroyed.
1297 1297 Intended for use by strip and rollback, so there's a common
1298 1298 place for anything that has to be done after destroying history.'''
1299 1299 # XXX it might be nice if we could take the list of destroyed
1300 1300 # nodes, but I don't see an easy way for rollback() to do that
1301 1301
1302 1302 # Ensure the persistent tag cache is updated. Doing it now
1303 1303 # means that the tag cache only has to worry about destroyed
1304 1304 # heads immediately after a strip/rollback. That in turn
1305 1305 # guarantees that "cachetip == currenttip" (comparing both rev
1306 1306 # and node) always means no nodes have been added or destroyed.
1307 1307
1308 1308 # XXX this is suboptimal when qrefresh'ing: we strip the current
1309 1309 # head, refresh the tag cache, then immediately add a new head.
1310 1310 # But I think doing it this way is necessary for the "instant
1311 1311 # tag cache retrieval" case to work.
1312 1312 self.invalidatecaches()
1313 1313
1314 1314 # Discard all cache entries to force reloading everything.
1315 1315 self._filecache.clear()
1316 1316
1317 1317 def walk(self, match, node=None):
1318 1318 '''
1319 1319 walk recursively through the directory tree or a given
1320 1320 changeset, finding all files matched by the match
1321 1321 function
1322 1322 '''
1323 1323 return self[node].walk(match)
1324 1324
1325 1325 def status(self, node1='.', node2=None, match=None,
1326 1326 ignored=False, clean=False, unknown=False,
1327 1327 listsubrepos=False):
1328 1328 """return status of files between two nodes or node and working directory
1329 1329
1330 1330 If node1 is None, use the first dirstate parent instead.
1331 1331 If node2 is None, compare node1 with working directory.
1332 1332 """
1333 1333
1334 1334 def mfmatches(ctx):
1335 1335 mf = ctx.manifest().copy()
1336 if match.always():
1337 return mf
1336 1338 for fn in mf.keys():
1337 1339 if not match(fn):
1338 1340 del mf[fn]
1339 1341 return mf
1340 1342
1341 1343 if isinstance(node1, context.changectx):
1342 1344 ctx1 = node1
1343 1345 else:
1344 1346 ctx1 = self[node1]
1345 1347 if isinstance(node2, context.changectx):
1346 1348 ctx2 = node2
1347 1349 else:
1348 1350 ctx2 = self[node2]
1349 1351
1350 1352 working = ctx2.rev() is None
1351 1353 parentworking = working and ctx1 == self['.']
1352 1354 match = match or matchmod.always(self.root, self.getcwd())
1353 1355 listignored, listclean, listunknown = ignored, clean, unknown
1354 1356
1355 1357 # load earliest manifest first for caching reasons
1356 1358 if not working and ctx2.rev() < ctx1.rev():
1357 1359 ctx2.manifest()
1358 1360
1359 1361 if not parentworking:
1360 1362 def bad(f, msg):
1361 1363 # 'f' may be a directory pattern from 'match.files()',
1362 1364 # so 'f not in ctx1' is not enough
1363 1365 if f not in ctx1 and f not in ctx1.dirs():
1364 1366 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1365 1367 match.bad = bad
1366 1368
1367 1369 if working: # we need to scan the working dir
1368 1370 subrepos = []
1369 1371 if '.hgsub' in self.dirstate:
1370 1372 subrepos = ctx2.substate.keys()
1371 1373 s = self.dirstate.status(match, subrepos, listignored,
1372 1374 listclean, listunknown)
1373 1375 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1374 1376
1375 1377 # check for any possibly clean files
1376 1378 if parentworking and cmp:
1377 1379 fixup = []
1378 1380 # do a full compare of any files that might have changed
1379 1381 for f in sorted(cmp):
1380 1382 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1381 1383 or ctx1[f].cmp(ctx2[f])):
1382 1384 modified.append(f)
1383 1385 else:
1384 1386 fixup.append(f)
1385 1387
1386 1388 # update dirstate for files that are actually clean
1387 1389 if fixup:
1388 1390 if listclean:
1389 1391 clean += fixup
1390 1392
1391 1393 try:
1392 1394 # updating the dirstate is optional
1393 1395 # so we don't wait on the lock
1394 1396 wlock = self.wlock(False)
1395 1397 try:
1396 1398 for f in fixup:
1397 1399 self.dirstate.normal(f)
1398 1400 finally:
1399 1401 wlock.release()
1400 1402 except error.LockError:
1401 1403 pass
1402 1404
1403 1405 if not parentworking:
1404 1406 mf1 = mfmatches(ctx1)
1405 1407 if working:
1406 1408 # we are comparing working dir against non-parent
1407 1409 # generate a pseudo-manifest for the working dir
1408 1410 mf2 = mfmatches(self['.'])
1409 1411 for f in cmp + modified + added:
1410 1412 mf2[f] = None
1411 1413 mf2.set(f, ctx2.flags(f))
1412 1414 for f in removed:
1413 1415 if f in mf2:
1414 1416 del mf2[f]
1415 1417 else:
1416 1418 # we are comparing two revisions
1417 1419 deleted, unknown, ignored = [], [], []
1418 1420 mf2 = mfmatches(ctx2)
1419 1421
1420 1422 modified, added, clean = [], [], []
1421 1423 for fn in mf2:
1422 1424 if fn in mf1:
1423 1425 if (fn not in deleted and
1424 1426 (mf1.flags(fn) != mf2.flags(fn) or
1425 1427 (mf1[fn] != mf2[fn] and
1426 1428 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1427 1429 modified.append(fn)
1428 1430 elif listclean:
1429 1431 clean.append(fn)
1430 1432 del mf1[fn]
1431 1433 elif fn not in deleted:
1432 1434 added.append(fn)
1433 1435 removed = mf1.keys()
1434 1436
1435 1437 if working and modified and not self.dirstate._checklink:
1436 1438 # Symlink placeholders may get non-symlink-like contents
1437 1439 # via user error or dereferencing by NFS or Samba servers,
1438 1440 # so we filter out any placeholders that don't look like a
1439 1441 # symlink
1440 1442 sane = []
1441 1443 for f in modified:
1442 1444 if ctx2.flags(f) == 'l':
1443 1445 d = ctx2[f].data()
1444 1446 if len(d) >= 1024 or '\n' in d or util.binary(d):
1445 1447 self.ui.debug('ignoring suspect symlink placeholder'
1446 1448 ' "%s"\n' % f)
1447 1449 continue
1448 1450 sane.append(f)
1449 1451 modified = sane
1450 1452
1451 1453 r = modified, added, removed, deleted, unknown, ignored, clean
1452 1454
1453 1455 if listsubrepos:
1454 1456 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1455 1457 if working:
1456 1458 rev2 = None
1457 1459 else:
1458 1460 rev2 = ctx2.substate[subpath][1]
1459 1461 try:
1460 1462 submatch = matchmod.narrowmatcher(subpath, match)
1461 1463 s = sub.status(rev2, match=submatch, ignored=listignored,
1462 1464 clean=listclean, unknown=listunknown,
1463 1465 listsubrepos=True)
1464 1466 for rfiles, sfiles in zip(r, s):
1465 1467 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1466 1468 except error.LookupError:
1467 1469 self.ui.status(_("skipping missing subrepository: %s\n")
1468 1470 % subpath)
1469 1471
1470 1472 for l in r:
1471 1473 l.sort()
1472 1474 return r
1473 1475
1474 1476 def heads(self, start=None):
1475 1477 heads = self.changelog.heads(start)
1476 1478 # sort the output in rev descending order
1477 1479 return sorted(heads, key=self.changelog.rev, reverse=True)
1478 1480
1479 1481 def branchheads(self, branch=None, start=None, closed=False):
1480 1482 '''return a (possibly filtered) list of heads for the given branch
1481 1483
1482 1484 Heads are returned in topological order, from newest to oldest.
1483 1485 If branch is None, use the dirstate branch.
1484 1486 If start is not None, return only heads reachable from start.
1485 1487 If closed is True, return heads that are marked as closed as well.
1486 1488 '''
1487 1489 if branch is None:
1488 1490 branch = self[None].branch()
1489 1491 branches = self.branchmap()
1490 1492 if branch not in branches:
1491 1493 return []
1492 1494 # the cache returns heads ordered lowest to highest
1493 1495 bheads = list(reversed(branches[branch]))
1494 1496 if start is not None:
1495 1497 # filter out the heads that cannot be reached from startrev
1496 1498 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1497 1499 bheads = [h for h in bheads if h in fbheads]
1498 1500 if not closed:
1499 1501 bheads = [h for h in bheads if
1500 1502 ('close' not in self.changelog.read(h)[5])]
1501 1503 return bheads
1502 1504
1503 1505 def branches(self, nodes):
1504 1506 if not nodes:
1505 1507 nodes = [self.changelog.tip()]
1506 1508 b = []
1507 1509 for n in nodes:
1508 1510 t = n
1509 1511 while True:
1510 1512 p = self.changelog.parents(n)
1511 1513 if p[1] != nullid or p[0] == nullid:
1512 1514 b.append((t, n, p[0], p[1]))
1513 1515 break
1514 1516 n = p[0]
1515 1517 return b
1516 1518
1517 1519 def between(self, pairs):
1518 1520 r = []
1519 1521
1520 1522 for top, bottom in pairs:
1521 1523 n, l, i = top, [], 0
1522 1524 f = 1
1523 1525
1524 1526 while n != bottom and n != nullid:
1525 1527 p = self.changelog.parents(n)[0]
1526 1528 if i == f:
1527 1529 l.append(n)
1528 1530 f = f * 2
1529 1531 n = p
1530 1532 i += 1
1531 1533
1532 1534 r.append(l)
1533 1535
1534 1536 return r
1535 1537
1536 1538 def pull(self, remote, heads=None, force=False):
1537 1539 lock = self.lock()
1538 1540 try:
1539 1541 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1540 1542 force=force)
1541 1543 common, fetch, rheads = tmp
1542 1544 if not fetch:
1543 1545 self.ui.status(_("no changes found\n"))
1544 1546 added = []
1545 1547 result = 0
1546 1548 else:
1547 1549 if heads is None and list(common) == [nullid]:
1548 1550 self.ui.status(_("requesting all changes\n"))
1549 1551 elif heads is None and remote.capable('changegroupsubset'):
1550 1552 # issue1320, avoid a race if remote changed after discovery
1551 1553 heads = rheads
1552 1554
1553 1555 if remote.capable('getbundle'):
1554 1556 cg = remote.getbundle('pull', common=common,
1555 1557 heads=heads or rheads)
1556 1558 elif heads is None:
1557 1559 cg = remote.changegroup(fetch, 'pull')
1558 1560 elif not remote.capable('changegroupsubset'):
1559 1561 raise util.Abort(_("partial pull cannot be done because "
1560 1562 "other repository doesn't support "
1561 1563 "changegroupsubset."))
1562 1564 else:
1563 1565 cg = remote.changegroupsubset(fetch, heads, 'pull')
1564 1566 clstart = len(self.changelog)
1565 1567 result = self.addchangegroup(cg, 'pull', remote.url())
1566 1568 clend = len(self.changelog)
1567 1569 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1568 1570
1569 1571 # compute target subset
1570 1572 if heads is None:
1571 1573 # We pulled every thing possible
1572 1574 # sync on everything common
1573 1575 subset = common + added
1574 1576 else:
1575 1577 # We pulled a specific subset
1576 1578 # sync on this subset
1577 1579 subset = heads
1578 1580
1579 1581 # Get remote phases data from remote
1580 1582 remotephases = remote.listkeys('phases')
1581 1583 publishing = bool(remotephases.get('publishing', False))
1582 1584 if remotephases and not publishing:
1583 1585 # remote is new and unpublishing
1584 1586 pheads, _dr = phases.analyzeremotephases(self, subset,
1585 1587 remotephases)
1586 1588 phases.advanceboundary(self, phases.public, pheads)
1587 1589 phases.advanceboundary(self, phases.draft, subset)
1588 1590 else:
1589 1591 # Remote is old or publishing all common changesets
1590 1592 # should be seen as public
1591 1593 phases.advanceboundary(self, phases.public, subset)
1592 1594 finally:
1593 1595 lock.release()
1594 1596
1595 1597 return result
1596 1598
1597 1599 def checkpush(self, force, revs):
1598 1600 """Extensions can override this function if additional checks have
1599 1601 to be performed before pushing, or call it if they override push
1600 1602 command.
1601 1603 """
1602 1604 pass
1603 1605
1604 1606 def push(self, remote, force=False, revs=None, newbranch=False):
1605 1607 '''Push outgoing changesets (limited by revs) from the current
1606 1608 repository to remote. Return an integer:
1607 1609 - None means nothing to push
1608 1610 - 0 means HTTP error
1609 1611 - 1 means we pushed and remote head count is unchanged *or*
1610 1612 we have outgoing changesets but refused to push
1611 1613 - other values as described by addchangegroup()
1612 1614 '''
1613 1615 # there are two ways to push to remote repo:
1614 1616 #
1615 1617 # addchangegroup assumes local user can lock remote
1616 1618 # repo (local filesystem, old ssh servers).
1617 1619 #
1618 1620 # unbundle assumes local user cannot lock remote repo (new ssh
1619 1621 # servers, http servers).
1620 1622
1621 1623 # get local lock as we might write phase data
1622 1624 locallock = self.lock()
1623 1625 try:
1624 1626 self.checkpush(force, revs)
1625 1627 lock = None
1626 1628 unbundle = remote.capable('unbundle')
1627 1629 if not unbundle:
1628 1630 lock = remote.lock()
1629 1631 try:
1630 1632 # discovery
1631 1633 fci = discovery.findcommonincoming
1632 1634 commoninc = fci(self, remote, force=force)
1633 1635 common, inc, remoteheads = commoninc
1634 1636 fco = discovery.findcommonoutgoing
1635 1637 outgoing = fco(self, remote, onlyheads=revs,
1636 1638 commoninc=commoninc, force=force)
1637 1639
1638 1640
1639 1641 if not outgoing.missing:
1640 1642 # nothing to push
1641 1643 scmutil.nochangesfound(self.ui, outgoing.excluded)
1642 1644 ret = None
1643 1645 else:
1644 1646 # something to push
1645 1647 if not force:
1646 1648 discovery.checkheads(self, remote, outgoing,
1647 1649 remoteheads, newbranch,
1648 1650 bool(inc))
1649 1651
1650 1652 # create a changegroup from local
1651 1653 if revs is None and not outgoing.excluded:
1652 1654 # push everything,
1653 1655 # use the fast path, no race possible on push
1654 1656 cg = self._changegroup(outgoing.missing, 'push')
1655 1657 else:
1656 1658 cg = self.getlocalbundle('push', outgoing)
1657 1659
1658 1660 # apply changegroup to remote
1659 1661 if unbundle:
1660 1662 # local repo finds heads on server, finds out what
1661 1663 # revs it must push. once revs transferred, if server
1662 1664 # finds it has different heads (someone else won
1663 1665 # commit/push race), server aborts.
1664 1666 if force:
1665 1667 remoteheads = ['force']
1666 1668 # ssh: return remote's addchangegroup()
1667 1669 # http: return remote's addchangegroup() or 0 for error
1668 1670 ret = remote.unbundle(cg, remoteheads, 'push')
1669 1671 else:
1670 1672 # we return an integer indicating remote head count change
1671 1673 ret = remote.addchangegroup(cg, 'push', self.url())
1672 1674
1673 1675 if ret:
1674 1676 # push succeed, synchonize target of the push
1675 1677 cheads = outgoing.missingheads
1676 1678 elif revs is None:
1677 1679 # All out push fails. synchronize all common
1678 1680 cheads = outgoing.commonheads
1679 1681 else:
1680 1682 # I want cheads = heads(::missingheads and ::commonheads)
1681 1683 # (missingheads is revs with secret changeset filtered out)
1682 1684 #
1683 1685 # This can be expressed as:
1684 1686 # cheads = ( (missingheads and ::commonheads)
1685 1687 # + (commonheads and ::missingheads))"
1686 1688 # )
1687 1689 #
1688 1690 # while trying to push we already computed the following:
1689 1691 # common = (::commonheads)
1690 1692 # missing = ((commonheads::missingheads) - commonheads)
1691 1693 #
1692 1694 # We can pick:
1693 1695 # * missingheads part of comon (::commonheads)
1694 1696 common = set(outgoing.common)
1695 1697 cheads = [node for node in revs if node in common]
1696 1698 # and
1697 1699 # * commonheads parents on missing
1698 1700 revset = self.set('%ln and parents(roots(%ln))',
1699 1701 outgoing.commonheads,
1700 1702 outgoing.missing)
1701 1703 cheads.extend(c.node() for c in revset)
1702 1704 # even when we don't push, exchanging phase data is useful
1703 1705 remotephases = remote.listkeys('phases')
1704 1706 if not remotephases: # old server or public only repo
1705 1707 phases.advanceboundary(self, phases.public, cheads)
1706 1708 # don't push any phase data as there is nothing to push
1707 1709 else:
1708 1710 ana = phases.analyzeremotephases(self, cheads, remotephases)
1709 1711 pheads, droots = ana
1710 1712 ### Apply remote phase on local
1711 1713 if remotephases.get('publishing', False):
1712 1714 phases.advanceboundary(self, phases.public, cheads)
1713 1715 else: # publish = False
1714 1716 phases.advanceboundary(self, phases.public, pheads)
1715 1717 phases.advanceboundary(self, phases.draft, cheads)
1716 1718 ### Apply local phase on remote
1717 1719
1718 1720 # Get the list of all revs draft on remote by public here.
1719 1721 # XXX Beware that revset break if droots is not strictly
1720 1722 # XXX root we may want to ensure it is but it is costly
1721 1723 outdated = self.set('heads((%ln::%ln) and public())',
1722 1724 droots, cheads)
1723 1725 for newremotehead in outdated:
1724 1726 r = remote.pushkey('phases',
1725 1727 newremotehead.hex(),
1726 1728 str(phases.draft),
1727 1729 str(phases.public))
1728 1730 if not r:
1729 1731 self.ui.warn(_('updating %s to public failed!\n')
1730 1732 % newremotehead)
1731 1733 finally:
1732 1734 if lock is not None:
1733 1735 lock.release()
1734 1736 finally:
1735 1737 locallock.release()
1736 1738
1737 1739 self.ui.debug("checking for updated bookmarks\n")
1738 1740 rb = remote.listkeys('bookmarks')
1739 1741 for k in rb.keys():
1740 1742 if k in self._bookmarks:
1741 1743 nr, nl = rb[k], hex(self._bookmarks[k])
1742 1744 if nr in self:
1743 1745 cr = self[nr]
1744 1746 cl = self[nl]
1745 1747 if cl in cr.descendants():
1746 1748 r = remote.pushkey('bookmarks', k, nr, nl)
1747 1749 if r:
1748 1750 self.ui.status(_("updating bookmark %s\n") % k)
1749 1751 else:
1750 1752 self.ui.warn(_('updating bookmark %s'
1751 1753 ' failed!\n') % k)
1752 1754
1753 1755 return ret
1754 1756
1755 1757 def changegroupinfo(self, nodes, source):
1756 1758 if self.ui.verbose or source == 'bundle':
1757 1759 self.ui.status(_("%d changesets found\n") % len(nodes))
1758 1760 if self.ui.debugflag:
1759 1761 self.ui.debug("list of changesets:\n")
1760 1762 for node in nodes:
1761 1763 self.ui.debug("%s\n" % hex(node))
1762 1764
1763 1765 def changegroupsubset(self, bases, heads, source):
1764 1766 """Compute a changegroup consisting of all the nodes that are
1765 1767 descendants of any of the bases and ancestors of any of the heads.
1766 1768 Return a chunkbuffer object whose read() method will return
1767 1769 successive changegroup chunks.
1768 1770
1769 1771 It is fairly complex as determining which filenodes and which
1770 1772 manifest nodes need to be included for the changeset to be complete
1771 1773 is non-trivial.
1772 1774
1773 1775 Another wrinkle is doing the reverse, figuring out which changeset in
1774 1776 the changegroup a particular filenode or manifestnode belongs to.
1775 1777 """
1776 1778 cl = self.changelog
1777 1779 if not bases:
1778 1780 bases = [nullid]
1779 1781 csets, bases, heads = cl.nodesbetween(bases, heads)
1780 1782 # We assume that all ancestors of bases are known
1781 1783 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1782 1784 return self._changegroupsubset(common, csets, heads, source)
1783 1785
1784 1786 def getlocalbundle(self, source, outgoing):
1785 1787 """Like getbundle, but taking a discovery.outgoing as an argument.
1786 1788
1787 1789 This is only implemented for local repos and reuses potentially
1788 1790 precomputed sets in outgoing."""
1789 1791 if not outgoing.missing:
1790 1792 return None
1791 1793 return self._changegroupsubset(outgoing.common,
1792 1794 outgoing.missing,
1793 1795 outgoing.missingheads,
1794 1796 source)
1795 1797
1796 1798 def getbundle(self, source, heads=None, common=None):
1797 1799 """Like changegroupsubset, but returns the set difference between the
1798 1800 ancestors of heads and the ancestors common.
1799 1801
1800 1802 If heads is None, use the local heads. If common is None, use [nullid].
1801 1803
1802 1804 The nodes in common might not all be known locally due to the way the
1803 1805 current discovery protocol works.
1804 1806 """
1805 1807 cl = self.changelog
1806 1808 if common:
1807 1809 nm = cl.nodemap
1808 1810 common = [n for n in common if n in nm]
1809 1811 else:
1810 1812 common = [nullid]
1811 1813 if not heads:
1812 1814 heads = cl.heads()
1813 1815 return self.getlocalbundle(source,
1814 1816 discovery.outgoing(cl, common, heads))
1815 1817
1816 1818 def _changegroupsubset(self, commonrevs, csets, heads, source):
1817 1819
1818 1820 cl = self.changelog
1819 1821 mf = self.manifest
1820 1822 mfs = {} # needed manifests
1821 1823 fnodes = {} # needed file nodes
1822 1824 changedfiles = set()
1823 1825 fstate = ['', {}]
1824 1826 count = [0, 0]
1825 1827
1826 1828 # can we go through the fast path ?
1827 1829 heads.sort()
1828 1830 if heads == sorted(self.heads()):
1829 1831 return self._changegroup(csets, source)
1830 1832
1831 1833 # slow path
1832 1834 self.hook('preoutgoing', throw=True, source=source)
1833 1835 self.changegroupinfo(csets, source)
1834 1836
1835 1837 # filter any nodes that claim to be part of the known set
1836 1838 def prune(revlog, missing):
1837 1839 rr, rl = revlog.rev, revlog.linkrev
1838 1840 return [n for n in missing
1839 1841 if rl(rr(n)) not in commonrevs]
1840 1842
1841 1843 progress = self.ui.progress
1842 1844 _bundling = _('bundling')
1843 1845 _changesets = _('changesets')
1844 1846 _manifests = _('manifests')
1845 1847 _files = _('files')
1846 1848
1847 1849 def lookup(revlog, x):
1848 1850 if revlog == cl:
1849 1851 c = cl.read(x)
1850 1852 changedfiles.update(c[3])
1851 1853 mfs.setdefault(c[0], x)
1852 1854 count[0] += 1
1853 1855 progress(_bundling, count[0],
1854 1856 unit=_changesets, total=count[1])
1855 1857 return x
1856 1858 elif revlog == mf:
1857 1859 clnode = mfs[x]
1858 1860 mdata = mf.readfast(x)
1859 1861 for f, n in mdata.iteritems():
1860 1862 if f in changedfiles:
1861 1863 fnodes[f].setdefault(n, clnode)
1862 1864 count[0] += 1
1863 1865 progress(_bundling, count[0],
1864 1866 unit=_manifests, total=count[1])
1865 1867 return clnode
1866 1868 else:
1867 1869 progress(_bundling, count[0], item=fstate[0],
1868 1870 unit=_files, total=count[1])
1869 1871 return fstate[1][x]
1870 1872
1871 1873 bundler = changegroup.bundle10(lookup)
1872 1874 reorder = self.ui.config('bundle', 'reorder', 'auto')
1873 1875 if reorder == 'auto':
1874 1876 reorder = None
1875 1877 else:
1876 1878 reorder = util.parsebool(reorder)
1877 1879
1878 1880 def gengroup():
1879 1881 # Create a changenode group generator that will call our functions
1880 1882 # back to lookup the owning changenode and collect information.
1881 1883 count[:] = [0, len(csets)]
1882 1884 for chunk in cl.group(csets, bundler, reorder=reorder):
1883 1885 yield chunk
1884 1886 progress(_bundling, None)
1885 1887
1886 1888 # Create a generator for the manifestnodes that calls our lookup
1887 1889 # and data collection functions back.
1888 1890 for f in changedfiles:
1889 1891 fnodes[f] = {}
1890 1892 count[:] = [0, len(mfs)]
1891 1893 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1892 1894 yield chunk
1893 1895 progress(_bundling, None)
1894 1896
1895 1897 mfs.clear()
1896 1898
1897 1899 # Go through all our files in order sorted by name.
1898 1900 count[:] = [0, len(changedfiles)]
1899 1901 for fname in sorted(changedfiles):
1900 1902 filerevlog = self.file(fname)
1901 1903 if not len(filerevlog):
1902 1904 raise util.Abort(_("empty or missing revlog for %s") % fname)
1903 1905 fstate[0] = fname
1904 1906 fstate[1] = fnodes.pop(fname, {})
1905 1907
1906 1908 nodelist = prune(filerevlog, fstate[1])
1907 1909 if nodelist:
1908 1910 count[0] += 1
1909 1911 yield bundler.fileheader(fname)
1910 1912 for chunk in filerevlog.group(nodelist, bundler, reorder):
1911 1913 yield chunk
1912 1914
1913 1915 # Signal that no more groups are left.
1914 1916 yield bundler.close()
1915 1917 progress(_bundling, None)
1916 1918
1917 1919 if csets:
1918 1920 self.hook('outgoing', node=hex(csets[0]), source=source)
1919 1921
1920 1922 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1921 1923
1922 1924 def changegroup(self, basenodes, source):
1923 1925 # to avoid a race we use changegroupsubset() (issue1320)
1924 1926 return self.changegroupsubset(basenodes, self.heads(), source)
1925 1927
1926 1928 def _changegroup(self, nodes, source):
1927 1929 """Compute the changegroup of all nodes that we have that a recipient
1928 1930 doesn't. Return a chunkbuffer object whose read() method will return
1929 1931 successive changegroup chunks.
1930 1932
1931 1933 This is much easier than the previous function as we can assume that
1932 1934 the recipient has any changenode we aren't sending them.
1933 1935
1934 1936 nodes is the set of nodes to send"""
1935 1937
1936 1938 cl = self.changelog
1937 1939 mf = self.manifest
1938 1940 mfs = {}
1939 1941 changedfiles = set()
1940 1942 fstate = ['']
1941 1943 count = [0, 0]
1942 1944
1943 1945 self.hook('preoutgoing', throw=True, source=source)
1944 1946 self.changegroupinfo(nodes, source)
1945 1947
1946 1948 revset = set([cl.rev(n) for n in nodes])
1947 1949
1948 1950 def gennodelst(log):
1949 1951 ln, llr = log.node, log.linkrev
1950 1952 return [ln(r) for r in log if llr(r) in revset]
1951 1953
1952 1954 progress = self.ui.progress
1953 1955 _bundling = _('bundling')
1954 1956 _changesets = _('changesets')
1955 1957 _manifests = _('manifests')
1956 1958 _files = _('files')
1957 1959
1958 1960 def lookup(revlog, x):
1959 1961 if revlog == cl:
1960 1962 c = cl.read(x)
1961 1963 changedfiles.update(c[3])
1962 1964 mfs.setdefault(c[0], x)
1963 1965 count[0] += 1
1964 1966 progress(_bundling, count[0],
1965 1967 unit=_changesets, total=count[1])
1966 1968 return x
1967 1969 elif revlog == mf:
1968 1970 count[0] += 1
1969 1971 progress(_bundling, count[0],
1970 1972 unit=_manifests, total=count[1])
1971 1973 return cl.node(revlog.linkrev(revlog.rev(x)))
1972 1974 else:
1973 1975 progress(_bundling, count[0], item=fstate[0],
1974 1976 total=count[1], unit=_files)
1975 1977 return cl.node(revlog.linkrev(revlog.rev(x)))
1976 1978
1977 1979 bundler = changegroup.bundle10(lookup)
1978 1980 reorder = self.ui.config('bundle', 'reorder', 'auto')
1979 1981 if reorder == 'auto':
1980 1982 reorder = None
1981 1983 else:
1982 1984 reorder = util.parsebool(reorder)
1983 1985
1984 1986 def gengroup():
1985 1987 '''yield a sequence of changegroup chunks (strings)'''
1986 1988 # construct a list of all changed files
1987 1989
1988 1990 count[:] = [0, len(nodes)]
1989 1991 for chunk in cl.group(nodes, bundler, reorder=reorder):
1990 1992 yield chunk
1991 1993 progress(_bundling, None)
1992 1994
1993 1995 count[:] = [0, len(mfs)]
1994 1996 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1995 1997 yield chunk
1996 1998 progress(_bundling, None)
1997 1999
1998 2000 count[:] = [0, len(changedfiles)]
1999 2001 for fname in sorted(changedfiles):
2000 2002 filerevlog = self.file(fname)
2001 2003 if not len(filerevlog):
2002 2004 raise util.Abort(_("empty or missing revlog for %s") % fname)
2003 2005 fstate[0] = fname
2004 2006 nodelist = gennodelst(filerevlog)
2005 2007 if nodelist:
2006 2008 count[0] += 1
2007 2009 yield bundler.fileheader(fname)
2008 2010 for chunk in filerevlog.group(nodelist, bundler, reorder):
2009 2011 yield chunk
2010 2012 yield bundler.close()
2011 2013 progress(_bundling, None)
2012 2014
2013 2015 if nodes:
2014 2016 self.hook('outgoing', node=hex(nodes[0]), source=source)
2015 2017
2016 2018 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2017 2019
2018 2020 def addchangegroup(self, source, srctype, url, emptyok=False):
2019 2021 """Add the changegroup returned by source.read() to this repo.
2020 2022 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2021 2023 the URL of the repo where this changegroup is coming from.
2022 2024
2023 2025 Return an integer summarizing the change to this repo:
2024 2026 - nothing changed or no source: 0
2025 2027 - more heads than before: 1+added heads (2..n)
2026 2028 - fewer heads than before: -1-removed heads (-2..-n)
2027 2029 - number of heads stays the same: 1
2028 2030 """
2029 2031 def csmap(x):
2030 2032 self.ui.debug("add changeset %s\n" % short(x))
2031 2033 return len(cl)
2032 2034
2033 2035 def revmap(x):
2034 2036 return cl.rev(x)
2035 2037
2036 2038 if not source:
2037 2039 return 0
2038 2040
2039 2041 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2040 2042
2041 2043 changesets = files = revisions = 0
2042 2044 efiles = set()
2043 2045
2044 2046 # write changelog data to temp files so concurrent readers will not see
2045 2047 # inconsistent view
2046 2048 cl = self.changelog
2047 2049 cl.delayupdate()
2048 2050 oldheads = cl.heads()
2049 2051
2050 2052 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2051 2053 try:
2052 2054 trp = weakref.proxy(tr)
2053 2055 # pull off the changeset group
2054 2056 self.ui.status(_("adding changesets\n"))
2055 2057 clstart = len(cl)
2056 2058 class prog(object):
2057 2059 step = _('changesets')
2058 2060 count = 1
2059 2061 ui = self.ui
2060 2062 total = None
2061 2063 def __call__(self):
2062 2064 self.ui.progress(self.step, self.count, unit=_('chunks'),
2063 2065 total=self.total)
2064 2066 self.count += 1
2065 2067 pr = prog()
2066 2068 source.callback = pr
2067 2069
2068 2070 source.changelogheader()
2069 2071 srccontent = cl.addgroup(source, csmap, trp)
2070 2072 if not (srccontent or emptyok):
2071 2073 raise util.Abort(_("received changelog group is empty"))
2072 2074 clend = len(cl)
2073 2075 changesets = clend - clstart
2074 2076 for c in xrange(clstart, clend):
2075 2077 efiles.update(self[c].files())
2076 2078 efiles = len(efiles)
2077 2079 self.ui.progress(_('changesets'), None)
2078 2080
2079 2081 # pull off the manifest group
2080 2082 self.ui.status(_("adding manifests\n"))
2081 2083 pr.step = _('manifests')
2082 2084 pr.count = 1
2083 2085 pr.total = changesets # manifests <= changesets
2084 2086 # no need to check for empty manifest group here:
2085 2087 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2086 2088 # no new manifest will be created and the manifest group will
2087 2089 # be empty during the pull
2088 2090 source.manifestheader()
2089 2091 self.manifest.addgroup(source, revmap, trp)
2090 2092 self.ui.progress(_('manifests'), None)
2091 2093
2092 2094 needfiles = {}
2093 2095 if self.ui.configbool('server', 'validate', default=False):
2094 2096 # validate incoming csets have their manifests
2095 2097 for cset in xrange(clstart, clend):
2096 2098 mfest = self.changelog.read(self.changelog.node(cset))[0]
2097 2099 mfest = self.manifest.readdelta(mfest)
2098 2100 # store file nodes we must see
2099 2101 for f, n in mfest.iteritems():
2100 2102 needfiles.setdefault(f, set()).add(n)
2101 2103
2102 2104 # process the files
2103 2105 self.ui.status(_("adding file changes\n"))
2104 2106 pr.step = _('files')
2105 2107 pr.count = 1
2106 2108 pr.total = efiles
2107 2109 source.callback = None
2108 2110
2109 2111 while True:
2110 2112 chunkdata = source.filelogheader()
2111 2113 if not chunkdata:
2112 2114 break
2113 2115 f = chunkdata["filename"]
2114 2116 self.ui.debug("adding %s revisions\n" % f)
2115 2117 pr()
2116 2118 fl = self.file(f)
2117 2119 o = len(fl)
2118 2120 if not fl.addgroup(source, revmap, trp):
2119 2121 raise util.Abort(_("received file revlog group is empty"))
2120 2122 revisions += len(fl) - o
2121 2123 files += 1
2122 2124 if f in needfiles:
2123 2125 needs = needfiles[f]
2124 2126 for new in xrange(o, len(fl)):
2125 2127 n = fl.node(new)
2126 2128 if n in needs:
2127 2129 needs.remove(n)
2128 2130 if not needs:
2129 2131 del needfiles[f]
2130 2132 self.ui.progress(_('files'), None)
2131 2133
2132 2134 for f, needs in needfiles.iteritems():
2133 2135 fl = self.file(f)
2134 2136 for n in needs:
2135 2137 try:
2136 2138 fl.rev(n)
2137 2139 except error.LookupError:
2138 2140 raise util.Abort(
2139 2141 _('missing file data for %s:%s - run hg verify') %
2140 2142 (f, hex(n)))
2141 2143
2142 2144 dh = 0
2143 2145 if oldheads:
2144 2146 heads = cl.heads()
2145 2147 dh = len(heads) - len(oldheads)
2146 2148 for h in heads:
2147 2149 if h not in oldheads and 'close' in self[h].extra():
2148 2150 dh -= 1
2149 2151 htext = ""
2150 2152 if dh:
2151 2153 htext = _(" (%+d heads)") % dh
2152 2154
2153 2155 self.ui.status(_("added %d changesets"
2154 2156 " with %d changes to %d files%s\n")
2155 2157 % (changesets, revisions, files, htext))
2156 2158
2157 2159 if changesets > 0:
2158 2160 p = lambda: cl.writepending() and self.root or ""
2159 2161 self.hook('pretxnchangegroup', throw=True,
2160 2162 node=hex(cl.node(clstart)), source=srctype,
2161 2163 url=url, pending=p)
2162 2164
2163 2165 added = [cl.node(r) for r in xrange(clstart, clend)]
2164 2166 publishing = self.ui.configbool('phases', 'publish', True)
2165 2167 if srctype == 'push':
2166 2168 # Old server can not push the boundary themself.
2167 2169 # New server won't push the boundary if changeset already
2168 2170 # existed locally as secrete
2169 2171 #
2170 2172 # We should not use added here but the list of all change in
2171 2173 # the bundle
2172 2174 if publishing:
2173 2175 phases.advanceboundary(self, phases.public, srccontent)
2174 2176 else:
2175 2177 phases.advanceboundary(self, phases.draft, srccontent)
2176 2178 phases.retractboundary(self, phases.draft, added)
2177 2179 elif srctype != 'strip':
2178 2180 # publishing only alter behavior during push
2179 2181 #
2180 2182 # strip should not touch boundary at all
2181 2183 phases.retractboundary(self, phases.draft, added)
2182 2184
2183 2185 # make changelog see real files again
2184 2186 cl.finalize(trp)
2185 2187
2186 2188 tr.close()
2187 2189
2188 2190 if changesets > 0:
2189 2191 def runhooks():
2190 2192 # forcefully update the on-disk branch cache
2191 2193 self.ui.debug("updating the branch cache\n")
2192 2194 self.updatebranchcache()
2193 2195 self.hook("changegroup", node=hex(cl.node(clstart)),
2194 2196 source=srctype, url=url)
2195 2197
2196 2198 for n in added:
2197 2199 self.hook("incoming", node=hex(n), source=srctype,
2198 2200 url=url)
2199 2201 self._afterlock(runhooks)
2200 2202
2201 2203 finally:
2202 2204 tr.release()
2203 2205 # never return 0 here:
2204 2206 if dh < 0:
2205 2207 return dh - 1
2206 2208 else:
2207 2209 return dh + 1
2208 2210
2209 2211 def stream_in(self, remote, requirements):
2210 2212 lock = self.lock()
2211 2213 try:
2212 2214 fp = remote.stream_out()
2213 2215 l = fp.readline()
2214 2216 try:
2215 2217 resp = int(l)
2216 2218 except ValueError:
2217 2219 raise error.ResponseError(
2218 2220 _('Unexpected response from remote server:'), l)
2219 2221 if resp == 1:
2220 2222 raise util.Abort(_('operation forbidden by server'))
2221 2223 elif resp == 2:
2222 2224 raise util.Abort(_('locking the remote repository failed'))
2223 2225 elif resp != 0:
2224 2226 raise util.Abort(_('the server sent an unknown error code'))
2225 2227 self.ui.status(_('streaming all changes\n'))
2226 2228 l = fp.readline()
2227 2229 try:
2228 2230 total_files, total_bytes = map(int, l.split(' ', 1))
2229 2231 except (ValueError, TypeError):
2230 2232 raise error.ResponseError(
2231 2233 _('Unexpected response from remote server:'), l)
2232 2234 self.ui.status(_('%d files to transfer, %s of data\n') %
2233 2235 (total_files, util.bytecount(total_bytes)))
2234 2236 start = time.time()
2235 2237 for i in xrange(total_files):
2236 2238 # XXX doesn't support '\n' or '\r' in filenames
2237 2239 l = fp.readline()
2238 2240 try:
2239 2241 name, size = l.split('\0', 1)
2240 2242 size = int(size)
2241 2243 except (ValueError, TypeError):
2242 2244 raise error.ResponseError(
2243 2245 _('Unexpected response from remote server:'), l)
2244 2246 if self.ui.debugflag:
2245 2247 self.ui.debug('adding %s (%s)\n' %
2246 2248 (name, util.bytecount(size)))
2247 2249 # for backwards compat, name was partially encoded
2248 2250 ofp = self.sopener(store.decodedir(name), 'w')
2249 2251 for chunk in util.filechunkiter(fp, limit=size):
2250 2252 ofp.write(chunk)
2251 2253 ofp.close()
2252 2254 elapsed = time.time() - start
2253 2255 if elapsed <= 0:
2254 2256 elapsed = 0.001
2255 2257 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2256 2258 (util.bytecount(total_bytes), elapsed,
2257 2259 util.bytecount(total_bytes / elapsed)))
2258 2260
2259 2261 # new requirements = old non-format requirements + new format-related
2260 2262 # requirements from the streamed-in repository
2261 2263 requirements.update(set(self.requirements) - self.supportedformats)
2262 2264 self._applyrequirements(requirements)
2263 2265 self._writerequirements()
2264 2266
2265 2267 self.invalidate()
2266 2268 return len(self.heads()) + 1
2267 2269 finally:
2268 2270 lock.release()
2269 2271
2270 2272 def clone(self, remote, heads=[], stream=False):
2271 2273 '''clone remote repository.
2272 2274
2273 2275 keyword arguments:
2274 2276 heads: list of revs to clone (forces use of pull)
2275 2277 stream: use streaming clone if possible'''
2276 2278
2277 2279 # now, all clients that can request uncompressed clones can
2278 2280 # read repo formats supported by all servers that can serve
2279 2281 # them.
2280 2282
2281 2283 # if revlog format changes, client will have to check version
2282 2284 # and format flags on "stream" capability, and use
2283 2285 # uncompressed only if compatible.
2284 2286
2285 2287 if not stream:
2286 2288 # if the server explicitely prefer to stream (for fast LANs)
2287 2289 stream = remote.capable('stream-preferred')
2288 2290
2289 2291 if stream and not heads:
2290 2292 # 'stream' means remote revlog format is revlogv1 only
2291 2293 if remote.capable('stream'):
2292 2294 return self.stream_in(remote, set(('revlogv1',)))
2293 2295 # otherwise, 'streamreqs' contains the remote revlog format
2294 2296 streamreqs = remote.capable('streamreqs')
2295 2297 if streamreqs:
2296 2298 streamreqs = set(streamreqs.split(','))
2297 2299 # if we support it, stream in and adjust our requirements
2298 2300 if not streamreqs - self.supportedformats:
2299 2301 return self.stream_in(remote, streamreqs)
2300 2302 return self.pull(remote, heads)
2301 2303
2302 2304 def pushkey(self, namespace, key, old, new):
2303 2305 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2304 2306 old=old, new=new)
2305 2307 ret = pushkey.push(self, namespace, key, old, new)
2306 2308 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2307 2309 ret=ret)
2308 2310 return ret
2309 2311
2310 2312 def listkeys(self, namespace):
2311 2313 self.hook('prelistkeys', throw=True, namespace=namespace)
2312 2314 values = pushkey.list(self, namespace)
2313 2315 self.hook('listkeys', namespace=namespace, values=values)
2314 2316 return values
2315 2317
2316 2318 def debugwireargs(self, one, two, three=None, four=None, five=None):
2317 2319 '''used to test argument passing over the wire'''
2318 2320 return "%s %s %s %s %s" % (one, two, three, four, five)
2319 2321
2320 2322 def savecommitmessage(self, text):
2321 2323 fp = self.opener('last-message.txt', 'wb')
2322 2324 try:
2323 2325 fp.write(text)
2324 2326 finally:
2325 2327 fp.close()
2326 2328 return self.pathto(fp.name[len(self.root)+1:])
2327 2329
2328 2330 # used to avoid circular references so destructors work
2329 2331 def aftertrans(files):
2330 2332 renamefiles = [tuple(t) for t in files]
2331 2333 def a():
2332 2334 for src, dest in renamefiles:
2333 2335 try:
2334 2336 util.rename(src, dest)
2335 2337 except OSError: # journal file does not yet exist
2336 2338 pass
2337 2339 return a
2338 2340
2339 2341 def undoname(fn):
2340 2342 base, name = os.path.split(fn)
2341 2343 assert name.startswith('journal')
2342 2344 return os.path.join(base, name.replace('journal', 'undo', 1))
2343 2345
2344 2346 def instance(ui, path, create):
2345 2347 return localrepository(ui, util.urllocalpath(path), create)
2346 2348
2347 2349 def islocal(path):
2348 2350 return True
@@ -1,337 +1,341 b''
1 1 # match.py - filename matching
2 2 #
3 3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import scmutil, util, fileset
10 10 from i18n import _
11 11
12 12 def _expandsets(pats, ctx):
13 13 '''convert set: patterns into a list of files in the given context'''
14 14 fset = set()
15 15 other = []
16 16
17 17 for kind, expr in pats:
18 18 if kind == 'set':
19 19 if not ctx:
20 20 raise util.Abort("fileset expression with no context")
21 21 s = fileset.getfileset(ctx, expr)
22 22 fset.update(s)
23 23 continue
24 24 other.append((kind, expr))
25 25 return fset, other
26 26
27 27 class match(object):
28 28 def __init__(self, root, cwd, patterns, include=[], exclude=[],
29 29 default='glob', exact=False, auditor=None, ctx=None):
30 30 """build an object to match a set of file patterns
31 31
32 32 arguments:
33 33 root - the canonical root of the tree you're matching against
34 34 cwd - the current working directory, if relevant
35 35 patterns - patterns to find
36 36 include - patterns to include
37 37 exclude - patterns to exclude
38 38 default - if a pattern in names has no explicit type, assume this one
39 39 exact - patterns are actually literals
40 40
41 41 a pattern is one of:
42 42 'glob:<glob>' - a glob relative to cwd
43 43 're:<regexp>' - a regular expression
44 44 'path:<path>' - a path relative to canonroot
45 45 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
46 46 'relpath:<path>' - a path relative to cwd
47 47 'relre:<regexp>' - a regexp that needn't match the start of a name
48 48 'set:<fileset>' - a fileset expression
49 49 '<something>' - a pattern of the specified default type
50 50 """
51 51
52 52 self._root = root
53 53 self._cwd = cwd
54 54 self._files = []
55 55 self._anypats = bool(include or exclude)
56 56 self._ctx = ctx
57 57
58 58 if include:
59 59 pats = _normalize(include, 'glob', root, cwd, auditor)
60 60 self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
61 61 if exclude:
62 62 pats = _normalize(exclude, 'glob', root, cwd, auditor)
63 63 self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
64 64 if exact:
65 65 self._files = patterns
66 66 pm = self.exact
67 67 elif patterns:
68 68 pats = _normalize(patterns, default, root, cwd, auditor)
69 69 self._files = _roots(pats)
70 70 self._anypats = self._anypats or _anypats(pats)
71 71 self.patternspat, pm = _buildmatch(ctx, pats, '$')
72 72
73 73 if patterns or exact:
74 74 if include:
75 75 if exclude:
76 76 m = lambda f: im(f) and not em(f) and pm(f)
77 77 else:
78 78 m = lambda f: im(f) and pm(f)
79 79 else:
80 80 if exclude:
81 81 m = lambda f: not em(f) and pm(f)
82 82 else:
83 83 m = pm
84 84 else:
85 85 if include:
86 86 if exclude:
87 87 m = lambda f: im(f) and not em(f)
88 88 else:
89 89 m = im
90 90 else:
91 91 if exclude:
92 92 m = lambda f: not em(f)
93 93 else:
94 94 m = lambda f: True
95 95
96 96 self.matchfn = m
97 97 self._fmap = set(self._files)
98 98
99 99 def __call__(self, fn):
100 100 return self.matchfn(fn)
101 101 def __iter__(self):
102 102 for f in self._files:
103 103 yield f
104 104 def bad(self, f, msg):
105 105 '''callback for each explicit file that can't be
106 106 found/accessed, with an error message
107 107 '''
108 108 pass
109 109 def dir(self, f):
110 110 pass
111 111 def missing(self, f):
112 112 pass
113 113 def exact(self, f):
114 114 return f in self._fmap
115 115 def rel(self, f):
116 116 return util.pathto(self._root, self._cwd, f)
117 117 def files(self):
118 118 return self._files
119 119 def anypats(self):
120 120 return self._anypats
121 def always(self):
122 return False
121 123
122 124 class exact(match):
123 125 def __init__(self, root, cwd, files):
124 126 match.__init__(self, root, cwd, files, exact = True)
125 127
126 128 class always(match):
127 129 def __init__(self, root, cwd):
128 130 match.__init__(self, root, cwd, [])
131 def always(self):
132 return True
129 133
130 134 class narrowmatcher(match):
131 135 """Adapt a matcher to work on a subdirectory only.
132 136
133 137 The paths are remapped to remove/insert the path as needed:
134 138
135 139 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
136 140 >>> m2 = narrowmatcher('sub', m1)
137 141 >>> bool(m2('a.txt'))
138 142 False
139 143 >>> bool(m2('b.txt'))
140 144 True
141 145 >>> bool(m2.matchfn('a.txt'))
142 146 False
143 147 >>> bool(m2.matchfn('b.txt'))
144 148 True
145 149 >>> m2.files()
146 150 ['b.txt']
147 151 >>> m2.exact('b.txt')
148 152 True
149 153 >>> m2.rel('b.txt')
150 154 'b.txt'
151 155 >>> def bad(f, msg):
152 156 ... print "%s: %s" % (f, msg)
153 157 >>> m1.bad = bad
154 158 >>> m2.bad('x.txt', 'No such file')
155 159 sub/x.txt: No such file
156 160 """
157 161
158 162 def __init__(self, path, matcher):
159 163 self._root = matcher._root
160 164 self._cwd = matcher._cwd
161 165 self._path = path
162 166 self._matcher = matcher
163 167
164 168 self._files = [f[len(path) + 1:] for f in matcher._files
165 169 if f.startswith(path + "/")]
166 170 self._anypats = matcher._anypats
167 171 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
168 172 self._fmap = set(self._files)
169 173
170 174 def bad(self, f, msg):
171 175 self._matcher.bad(self._path + "/" + f, msg)
172 176
173 177 def patkind(pat):
174 178 return _patsplit(pat, None)[0]
175 179
176 180 def _patsplit(pat, default):
177 181 """Split a string into an optional pattern kind prefix and the
178 182 actual pattern."""
179 183 if ':' in pat:
180 184 kind, val = pat.split(':', 1)
181 185 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
182 186 'listfile', 'listfile0', 'set'):
183 187 return kind, val
184 188 return default, pat
185 189
186 190 def _globre(pat):
187 191 "convert a glob pattern into a regexp"
188 192 i, n = 0, len(pat)
189 193 res = ''
190 194 group = 0
191 195 escape = re.escape
192 196 def peek():
193 197 return i < n and pat[i]
194 198 while i < n:
195 199 c = pat[i]
196 200 i += 1
197 201 if c not in '*?[{},\\':
198 202 res += escape(c)
199 203 elif c == '*':
200 204 if peek() == '*':
201 205 i += 1
202 206 res += '.*'
203 207 else:
204 208 res += '[^/]*'
205 209 elif c == '?':
206 210 res += '.'
207 211 elif c == '[':
208 212 j = i
209 213 if j < n and pat[j] in '!]':
210 214 j += 1
211 215 while j < n and pat[j] != ']':
212 216 j += 1
213 217 if j >= n:
214 218 res += '\\['
215 219 else:
216 220 stuff = pat[i:j].replace('\\','\\\\')
217 221 i = j + 1
218 222 if stuff[0] == '!':
219 223 stuff = '^' + stuff[1:]
220 224 elif stuff[0] == '^':
221 225 stuff = '\\' + stuff
222 226 res = '%s[%s]' % (res, stuff)
223 227 elif c == '{':
224 228 group += 1
225 229 res += '(?:'
226 230 elif c == '}' and group:
227 231 res += ')'
228 232 group -= 1
229 233 elif c == ',' and group:
230 234 res += '|'
231 235 elif c == '\\':
232 236 p = peek()
233 237 if p:
234 238 i += 1
235 239 res += escape(p)
236 240 else:
237 241 res += escape(c)
238 242 else:
239 243 res += escape(c)
240 244 return res
241 245
242 246 def _regex(kind, name, tail):
243 247 '''convert a pattern into a regular expression'''
244 248 if not name:
245 249 return ''
246 250 if kind == 're':
247 251 return name
248 252 elif kind == 'path':
249 253 return '^' + re.escape(name) + '(?:/|$)'
250 254 elif kind == 'relglob':
251 255 return '(?:|.*/)' + _globre(name) + tail
252 256 elif kind == 'relpath':
253 257 return re.escape(name) + '(?:/|$)'
254 258 elif kind == 'relre':
255 259 if name.startswith('^'):
256 260 return name
257 261 return '.*' + name
258 262 return _globre(name) + tail
259 263
260 264 def _buildmatch(ctx, pats, tail):
261 265 fset, pats = _expandsets(pats, ctx)
262 266 if not pats:
263 267 return "", fset.__contains__
264 268
265 269 pat, mf = _buildregexmatch(pats, tail)
266 270 if fset:
267 271 return pat, lambda f: f in fset or mf(f)
268 272 return pat, mf
269 273
270 274 def _buildregexmatch(pats, tail):
271 275 """build a matching function from a set of patterns"""
272 276 try:
273 277 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
274 278 if len(pat) > 20000:
275 279 raise OverflowError()
276 280 return pat, re.compile(pat).match
277 281 except OverflowError:
278 282 # We're using a Python with a tiny regex engine and we
279 283 # made it explode, so we'll divide the pattern list in two
280 284 # until it works
281 285 l = len(pats)
282 286 if l < 2:
283 287 raise
284 288 pata, a = _buildregexmatch(pats[:l//2], tail)
285 289 patb, b = _buildregexmatch(pats[l//2:], tail)
286 290 return pat, lambda s: a(s) or b(s)
287 291 except re.error:
288 292 for k, p in pats:
289 293 try:
290 294 re.compile('(?:%s)' % _regex(k, p, tail))
291 295 except re.error:
292 296 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
293 297 raise util.Abort(_("invalid pattern"))
294 298
295 299 def _normalize(names, default, root, cwd, auditor):
296 300 pats = []
297 301 for kind, name in [_patsplit(p, default) for p in names]:
298 302 if kind in ('glob', 'relpath'):
299 303 name = scmutil.canonpath(root, cwd, name, auditor)
300 304 elif kind in ('relglob', 'path'):
301 305 name = util.normpath(name)
302 306 elif kind in ('listfile', 'listfile0'):
303 307 try:
304 308 files = util.readfile(name)
305 309 if kind == 'listfile0':
306 310 files = files.split('\0')
307 311 else:
308 312 files = files.splitlines()
309 313 files = [f for f in files if f]
310 314 except EnvironmentError:
311 315 raise util.Abort(_("unable to read file list (%s)") % name)
312 316 pats += _normalize(files, default, root, cwd, auditor)
313 317 continue
314 318
315 319 pats.append((kind, name))
316 320 return pats
317 321
318 322 def _roots(patterns):
319 323 r = []
320 324 for kind, name in patterns:
321 325 if kind == 'glob': # find the non-glob prefix
322 326 root = []
323 327 for p in name.split('/'):
324 328 if '[' in p or '{' in p or '*' in p or '?' in p:
325 329 break
326 330 root.append(p)
327 331 r.append('/'.join(root) or '.')
328 332 elif kind in ('relpath', 'path'):
329 333 r.append(name or '.')
330 334 elif kind == 'relglob':
331 335 r.append('.')
332 336 return r
333 337
334 338 def _anypats(patterns):
335 339 for kind, name in patterns:
336 340 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
337 341 return True
General Comments 0
You need to be logged in to leave comments. Login now