##// END OF EJS Templates
merge with stable
Matt Mackall -
r15735:5b384b7f merge default
parent child Browse files
Show More
@@ -1,2219 +1,2220 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40
41 41 try:
42 42 self.ui.readconfig(self.join("hgrc"), self.root)
43 43 extensions.loadall(self.ui)
44 44 except IOError:
45 45 pass
46 46
47 47 if not os.path.isdir(self.path):
48 48 if create:
49 49 if not os.path.exists(path):
50 50 util.makedirs(path)
51 51 util.makedir(self.path, notindexed=True)
52 52 requirements = ["revlogv1"]
53 53 if self.ui.configbool('format', 'usestore', True):
54 54 os.mkdir(os.path.join(self.path, "store"))
55 55 requirements.append("store")
56 56 if self.ui.configbool('format', 'usefncache', True):
57 57 requirements.append("fncache")
58 58 if self.ui.configbool('format', 'dotencode', True):
59 59 requirements.append('dotencode')
60 60 # create an invalid changelog
61 61 self.opener.append(
62 62 "00changelog.i",
63 63 '\0\0\0\2' # represents revlogv2
64 64 ' dummy changelog to prevent using the old repo layout'
65 65 )
66 66 if self.ui.configbool('format', 'generaldelta', False):
67 67 requirements.append("generaldelta")
68 68 requirements = set(requirements)
69 69 else:
70 70 raise error.RepoError(_("repository %s not found") % path)
71 71 elif create:
72 72 raise error.RepoError(_("repository %s already exists") % path)
73 73 else:
74 74 try:
75 75 requirements = scmutil.readrequires(self.opener, self.supported)
76 76 except IOError, inst:
77 77 if inst.errno != errno.ENOENT:
78 78 raise
79 79 requirements = set()
80 80
81 81 self.sharedpath = self.path
82 82 try:
83 83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 84 if not os.path.exists(s):
85 85 raise error.RepoError(
86 86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 87 self.sharedpath = s
88 88 except IOError, inst:
89 89 if inst.errno != errno.ENOENT:
90 90 raise
91 91
92 92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 93 self.spath = self.store.path
94 94 self.sopener = self.store.opener
95 95 self.sjoin = self.store.join
96 96 self.opener.createmode = self.store.createmode
97 97 self._applyrequirements(requirements)
98 98 if create:
99 99 self._writerequirements()
100 100
101 101
102 102 self._branchcache = None
103 103 self._branchcachetip = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 # A cache for various files under .hg/ that tracks file changes,
109 109 # (used by the filecache decorator)
110 110 #
111 111 # Maps a property name to its util.filecacheentry
112 112 self._filecache = {}
113 113
114 114 def _applyrequirements(self, requirements):
115 115 self.requirements = requirements
116 116 openerreqs = set(('revlogv1', 'generaldelta'))
117 117 self.sopener.options = dict((r, 1) for r in requirements
118 118 if r in openerreqs)
119 119
120 120 def _writerequirements(self):
121 121 reqfile = self.opener("requires", "w")
122 122 for r in self.requirements:
123 123 reqfile.write("%s\n" % r)
124 124 reqfile.close()
125 125
126 126 def _checknested(self, path):
127 127 """Determine if path is a legal nested repository."""
128 128 if not path.startswith(self.root):
129 129 return False
130 130 subpath = path[len(self.root) + 1:]
131 normsubpath = util.pconvert(subpath)
131 132
132 133 # XXX: Checking against the current working copy is wrong in
133 134 # the sense that it can reject things like
134 135 #
135 136 # $ hg cat -r 10 sub/x.txt
136 137 #
137 138 # if sub/ is no longer a subrepository in the working copy
138 139 # parent revision.
139 140 #
140 141 # However, it can of course also allow things that would have
141 142 # been rejected before, such as the above cat command if sub/
142 143 # is a subrepository now, but was a normal directory before.
143 144 # The old path auditor would have rejected by mistake since it
144 145 # panics when it sees sub/.hg/.
145 146 #
146 147 # All in all, checking against the working copy seems sensible
147 148 # since we want to prevent access to nested repositories on
148 149 # the filesystem *now*.
149 150 ctx = self[None]
150 151 parts = util.splitpath(subpath)
151 152 while parts:
152 prefix = os.sep.join(parts)
153 prefix = '/'.join(parts)
153 154 if prefix in ctx.substate:
154 if prefix == subpath:
155 if prefix == normsubpath:
155 156 return True
156 157 else:
157 158 sub = ctx.sub(prefix)
158 159 return sub.checknested(subpath[len(prefix) + 1:])
159 160 else:
160 161 parts.pop()
161 162 return False
162 163
163 164 @filecache('bookmarks')
164 165 def _bookmarks(self):
165 166 return bookmarks.read(self)
166 167
167 168 @filecache('bookmarks.current')
168 169 def _bookmarkcurrent(self):
169 170 return bookmarks.readcurrent(self)
170 171
171 172 def _writebookmarks(self, marks):
172 173 bookmarks.write(self)
173 174
174 175 @filecache('phaseroots')
175 176 def _phaseroots(self):
176 177 self._dirtyphases = False
177 178 phaseroots = phases.readroots(self)
178 179 phases.filterunknown(self, phaseroots)
179 180 return phaseroots
180 181
181 182 @propertycache
182 183 def _phaserev(self):
183 184 cache = [0] * len(self)
184 185 for phase in phases.trackedphases:
185 186 roots = map(self.changelog.rev, self._phaseroots[phase])
186 187 if roots:
187 188 for rev in roots:
188 189 cache[rev] = phase
189 190 for rev in self.changelog.descendants(*roots):
190 191 cache[rev] = phase
191 192 return cache
192 193
193 194 @filecache('00changelog.i', True)
194 195 def changelog(self):
195 196 c = changelog.changelog(self.sopener)
196 197 if 'HG_PENDING' in os.environ:
197 198 p = os.environ['HG_PENDING']
198 199 if p.startswith(self.root):
199 200 c.readpending('00changelog.i.a')
200 201 return c
201 202
202 203 @filecache('00manifest.i', True)
203 204 def manifest(self):
204 205 return manifest.manifest(self.sopener)
205 206
206 207 @filecache('dirstate')
207 208 def dirstate(self):
208 209 warned = [0]
209 210 def validate(node):
210 211 try:
211 212 self.changelog.rev(node)
212 213 return node
213 214 except error.LookupError:
214 215 if not warned[0]:
215 216 warned[0] = True
216 217 self.ui.warn(_("warning: ignoring unknown"
217 218 " working parent %s!\n") % short(node))
218 219 return nullid
219 220
220 221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221 222
222 223 def __getitem__(self, changeid):
223 224 if changeid is None:
224 225 return context.workingctx(self)
225 226 return context.changectx(self, changeid)
226 227
227 228 def __contains__(self, changeid):
228 229 try:
229 230 return bool(self.lookup(changeid))
230 231 except error.RepoLookupError:
231 232 return False
232 233
233 234 def __nonzero__(self):
234 235 return True
235 236
236 237 def __len__(self):
237 238 return len(self.changelog)
238 239
239 240 def __iter__(self):
240 241 for i in xrange(len(self)):
241 242 yield i
242 243
243 244 def revs(self, expr, *args):
244 245 '''Return a list of revisions matching the given revset'''
245 246 expr = revset.formatspec(expr, *args)
246 247 m = revset.match(None, expr)
247 248 return [r for r in m(self, range(len(self)))]
248 249
249 250 def set(self, expr, *args):
250 251 '''
251 252 Yield a context for each matching revision, after doing arg
252 253 replacement via revset.formatspec
253 254 '''
254 255 for r in self.revs(expr, *args):
255 256 yield self[r]
256 257
257 258 def url(self):
258 259 return 'file:' + self.root
259 260
260 261 def hook(self, name, throw=False, **args):
261 262 return hook.hook(self.ui, self, name, throw, **args)
262 263
263 264 tag_disallowed = ':\r\n'
264 265
265 266 def _tag(self, names, node, message, local, user, date, extra={}):
266 267 if isinstance(names, str):
267 268 allchars = names
268 269 names = (names,)
269 270 else:
270 271 allchars = ''.join(names)
271 272 for c in self.tag_disallowed:
272 273 if c in allchars:
273 274 raise util.Abort(_('%r cannot be used in a tag name') % c)
274 275
275 276 branches = self.branchmap()
276 277 for name in names:
277 278 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 279 local=local)
279 280 if name in branches:
280 281 self.ui.warn(_("warning: tag %s conflicts with existing"
281 282 " branch name\n") % name)
282 283
283 284 def writetags(fp, names, munge, prevtags):
284 285 fp.seek(0, 2)
285 286 if prevtags and prevtags[-1] != '\n':
286 287 fp.write('\n')
287 288 for name in names:
288 289 m = munge and munge(name) or name
289 290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 291 old = self.tags().get(name, nullid)
291 292 fp.write('%s %s\n' % (hex(old), m))
292 293 fp.write('%s %s\n' % (hex(node), m))
293 294 fp.close()
294 295
295 296 prevtags = ''
296 297 if local:
297 298 try:
298 299 fp = self.opener('localtags', 'r+')
299 300 except IOError:
300 301 fp = self.opener('localtags', 'a')
301 302 else:
302 303 prevtags = fp.read()
303 304
304 305 # local tags are stored in the current charset
305 306 writetags(fp, names, None, prevtags)
306 307 for name in names:
307 308 self.hook('tag', node=hex(node), tag=name, local=local)
308 309 return
309 310
310 311 try:
311 312 fp = self.wfile('.hgtags', 'rb+')
312 313 except IOError, e:
313 314 if e.errno != errno.ENOENT:
314 315 raise
315 316 fp = self.wfile('.hgtags', 'ab')
316 317 else:
317 318 prevtags = fp.read()
318 319
319 320 # committed tags are stored in UTF-8
320 321 writetags(fp, names, encoding.fromlocal, prevtags)
321 322
322 323 fp.close()
323 324
324 325 if '.hgtags' not in self.dirstate:
325 326 self[None].add(['.hgtags'])
326 327
327 328 m = matchmod.exact(self.root, '', ['.hgtags'])
328 329 tagnode = self.commit(message, user, date, extra=extra, match=m)
329 330
330 331 for name in names:
331 332 self.hook('tag', node=hex(node), tag=name, local=local)
332 333
333 334 return tagnode
334 335
335 336 def tag(self, names, node, message, local, user, date):
336 337 '''tag a revision with one or more symbolic names.
337 338
338 339 names is a list of strings or, when adding a single tag, names may be a
339 340 string.
340 341
341 342 if local is True, the tags are stored in a per-repository file.
342 343 otherwise, they are stored in the .hgtags file, and a new
343 344 changeset is committed with the change.
344 345
345 346 keyword arguments:
346 347
347 348 local: whether to store tags in non-version-controlled file
348 349 (default False)
349 350
350 351 message: commit message to use if committing
351 352
352 353 user: name of user to use if committing
353 354
354 355 date: date tuple to use if committing'''
355 356
356 357 if not local:
357 358 for x in self.status()[:5]:
358 359 if '.hgtags' in x:
359 360 raise util.Abort(_('working copy of .hgtags is changed '
360 361 '(please commit .hgtags manually)'))
361 362
362 363 self.tags() # instantiate the cache
363 364 self._tag(names, node, message, local, user, date)
364 365
365 366 @propertycache
366 367 def _tagscache(self):
367 368 '''Returns a tagscache object that contains various tags related caches.'''
368 369
369 370 # This simplifies its cache management by having one decorated
370 371 # function (this one) and the rest simply fetch things from it.
371 372 class tagscache(object):
372 373 def __init__(self):
373 374 # These two define the set of tags for this repository. tags
374 375 # maps tag name to node; tagtypes maps tag name to 'global' or
375 376 # 'local'. (Global tags are defined by .hgtags across all
376 377 # heads, and local tags are defined in .hg/localtags.)
377 378 # They constitute the in-memory cache of tags.
378 379 self.tags = self.tagtypes = None
379 380
380 381 self.nodetagscache = self.tagslist = None
381 382
382 383 cache = tagscache()
383 384 cache.tags, cache.tagtypes = self._findtags()
384 385
385 386 return cache
386 387
387 388 def tags(self):
388 389 '''return a mapping of tag to node'''
389 390 return self._tagscache.tags
390 391
391 392 def _findtags(self):
392 393 '''Do the hard work of finding tags. Return a pair of dicts
393 394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 395 maps tag name to a string like \'global\' or \'local\'.
395 396 Subclasses or extensions are free to add their own tags, but
396 397 should be aware that the returned dicts will be retained for the
397 398 duration of the localrepo object.'''
398 399
399 400 # XXX what tagtype should subclasses/extensions use? Currently
400 401 # mq and bookmarks add tags, but do not set the tagtype at all.
401 402 # Should each extension invent its own tag type? Should there
402 403 # be one tagtype for all such "virtual" tags? Or is the status
403 404 # quo fine?
404 405
405 406 alltags = {} # map tag name to (node, hist)
406 407 tagtypes = {}
407 408
408 409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410 411
411 412 # Build the return dicts. Have to re-encode tag names because
412 413 # the tags module always uses UTF-8 (in order not to lose info
413 414 # writing to the cache), but the rest of Mercurial wants them in
414 415 # local encoding.
415 416 tags = {}
416 417 for (name, (node, hist)) in alltags.iteritems():
417 418 if node != nullid:
418 419 try:
419 420 # ignore tags to unknown nodes
420 421 self.changelog.lookup(node)
421 422 tags[encoding.tolocal(name)] = node
422 423 except error.LookupError:
423 424 pass
424 425 tags['tip'] = self.changelog.tip()
425 426 tagtypes = dict([(encoding.tolocal(name), value)
426 427 for (name, value) in tagtypes.iteritems()])
427 428 return (tags, tagtypes)
428 429
429 430 def tagtype(self, tagname):
430 431 '''
431 432 return the type of the given tag. result can be:
432 433
433 434 'local' : a local tag
434 435 'global' : a global tag
435 436 None : tag does not exist
436 437 '''
437 438
438 439 return self._tagscache.tagtypes.get(tagname)
439 440
440 441 def tagslist(self):
441 442 '''return a list of tags ordered by revision'''
442 443 if not self._tagscache.tagslist:
443 444 l = []
444 445 for t, n in self.tags().iteritems():
445 446 r = self.changelog.rev(n)
446 447 l.append((r, t, n))
447 448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 449
449 450 return self._tagscache.tagslist
450 451
451 452 def nodetags(self, node):
452 453 '''return the tags associated with a node'''
453 454 if not self._tagscache.nodetagscache:
454 455 nodetagscache = {}
455 456 for t, n in self.tags().iteritems():
456 457 nodetagscache.setdefault(n, []).append(t)
457 458 for tags in nodetagscache.itervalues():
458 459 tags.sort()
459 460 self._tagscache.nodetagscache = nodetagscache
460 461 return self._tagscache.nodetagscache.get(node, [])
461 462
462 463 def nodebookmarks(self, node):
463 464 marks = []
464 465 for bookmark, n in self._bookmarks.iteritems():
465 466 if n == node:
466 467 marks.append(bookmark)
467 468 return sorted(marks)
468 469
469 470 def _branchtags(self, partial, lrev):
470 471 # TODO: rename this function?
471 472 tiprev = len(self) - 1
472 473 if lrev != tiprev:
473 474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 475 self._updatebranchcache(partial, ctxgen)
475 476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 477
477 478 return partial
478 479
479 480 def updatebranchcache(self):
480 481 tip = self.changelog.tip()
481 482 if self._branchcache is not None and self._branchcachetip == tip:
482 483 return self._branchcache
483 484
484 485 oldtip = self._branchcachetip
485 486 self._branchcachetip = tip
486 487 if oldtip is None or oldtip not in self.changelog.nodemap:
487 488 partial, last, lrev = self._readbranchcache()
488 489 else:
489 490 lrev = self.changelog.rev(oldtip)
490 491 partial = self._branchcache
491 492
492 493 self._branchtags(partial, lrev)
493 494 # this private cache holds all heads (not just tips)
494 495 self._branchcache = partial
495 496
496 497 def branchmap(self):
497 498 '''returns a dictionary {branch: [branchheads]}'''
498 499 self.updatebranchcache()
499 500 return self._branchcache
500 501
501 502 def branchtags(self):
502 503 '''return a dict where branch names map to the tipmost head of
503 504 the branch, open heads come before closed'''
504 505 bt = {}
505 506 for bn, heads in self.branchmap().iteritems():
506 507 tip = heads[-1]
507 508 for h in reversed(heads):
508 509 if 'close' not in self.changelog.read(h)[5]:
509 510 tip = h
510 511 break
511 512 bt[bn] = tip
512 513 return bt
513 514
514 515 def _readbranchcache(self):
515 516 partial = {}
516 517 try:
517 518 f = self.opener("cache/branchheads")
518 519 lines = f.read().split('\n')
519 520 f.close()
520 521 except (IOError, OSError):
521 522 return {}, nullid, nullrev
522 523
523 524 try:
524 525 last, lrev = lines.pop(0).split(" ", 1)
525 526 last, lrev = bin(last), int(lrev)
526 527 if lrev >= len(self) or self[lrev].node() != last:
527 528 # invalidate the cache
528 529 raise ValueError('invalidating branch cache (tip differs)')
529 530 for l in lines:
530 531 if not l:
531 532 continue
532 533 node, label = l.split(" ", 1)
533 534 label = encoding.tolocal(label.strip())
534 535 partial.setdefault(label, []).append(bin(node))
535 536 except KeyboardInterrupt:
536 537 raise
537 538 except Exception, inst:
538 539 if self.ui.debugflag:
539 540 self.ui.warn(str(inst), '\n')
540 541 partial, last, lrev = {}, nullid, nullrev
541 542 return partial, last, lrev
542 543
543 544 def _writebranchcache(self, branches, tip, tiprev):
544 545 try:
545 546 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 547 f.write("%s %s\n" % (hex(tip), tiprev))
547 548 for label, nodes in branches.iteritems():
548 549 for node in nodes:
549 550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 551 f.close()
551 552 except (IOError, OSError):
552 553 pass
553 554
554 555 def _updatebranchcache(self, partial, ctxgen):
555 556 # collect new branch entries
556 557 newbranches = {}
557 558 for c in ctxgen:
558 559 newbranches.setdefault(c.branch(), []).append(c.node())
559 560 # if older branchheads are reachable from new ones, they aren't
560 561 # really branchheads. Note checking parents is insufficient:
561 562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 563 for branch, newnodes in newbranches.iteritems():
563 564 bheads = partial.setdefault(branch, [])
564 565 bheads.extend(newnodes)
565 566 if len(bheads) <= 1:
566 567 continue
567 568 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 569 # starting from tip means fewer passes over reachable
569 570 while newnodes:
570 571 latest = newnodes.pop()
571 572 if latest not in bheads:
572 573 continue
573 574 minbhrev = self[bheads[0]].node()
574 575 reachable = self.changelog.reachable(latest, minbhrev)
575 576 reachable.remove(latest)
576 577 if reachable:
577 578 bheads = [b for b in bheads if b not in reachable]
578 579 partial[branch] = bheads
579 580
580 581 def lookup(self, key):
581 582 if isinstance(key, int):
582 583 return self.changelog.node(key)
583 584 elif key == '.':
584 585 return self.dirstate.p1()
585 586 elif key == 'null':
586 587 return nullid
587 588 elif key == 'tip':
588 589 return self.changelog.tip()
589 590 n = self.changelog._match(key)
590 591 if n:
591 592 return n
592 593 if key in self._bookmarks:
593 594 return self._bookmarks[key]
594 595 if key in self.tags():
595 596 return self.tags()[key]
596 597 if key in self.branchtags():
597 598 return self.branchtags()[key]
598 599 n = self.changelog._partialmatch(key)
599 600 if n:
600 601 return n
601 602
602 603 # can't find key, check if it might have come from damaged dirstate
603 604 if key in self.dirstate.parents():
604 605 raise error.Abort(_("working directory has unknown parent '%s'!")
605 606 % short(key))
606 607 try:
607 608 if len(key) == 20:
608 609 key = hex(key)
609 610 except TypeError:
610 611 pass
611 612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612 613
613 614 def lookupbranch(self, key, remote=None):
614 615 repo = remote or self
615 616 if key in repo.branchmap():
616 617 return key
617 618
618 619 repo = (remote and remote.local()) and remote or self
619 620 return repo[key].branch()
620 621
621 622 def known(self, nodes):
622 623 nm = self.changelog.nodemap
623 624 return [(n in nm) for n in nodes]
624 625
625 626 def local(self):
626 627 return self
627 628
628 629 def join(self, f):
629 630 return os.path.join(self.path, f)
630 631
631 632 def wjoin(self, f):
632 633 return os.path.join(self.root, f)
633 634
634 635 def file(self, f):
635 636 if f[0] == '/':
636 637 f = f[1:]
637 638 return filelog.filelog(self.sopener, f)
638 639
639 640 def changectx(self, changeid):
640 641 return self[changeid]
641 642
642 643 def parents(self, changeid=None):
643 644 '''get list of changectxs for parents of changeid'''
644 645 return self[changeid].parents()
645 646
646 647 def filectx(self, path, changeid=None, fileid=None):
647 648 """changeid can be a changeset revision, node, or tag.
648 649 fileid can be a file revision or node."""
649 650 return context.filectx(self, path, changeid, fileid)
650 651
651 652 def getcwd(self):
652 653 return self.dirstate.getcwd()
653 654
654 655 def pathto(self, f, cwd=None):
655 656 return self.dirstate.pathto(f, cwd)
656 657
657 658 def wfile(self, f, mode='r'):
658 659 return self.wopener(f, mode)
659 660
660 661 def _link(self, f):
661 662 return os.path.islink(self.wjoin(f))
662 663
663 664 def _loadfilter(self, filter):
664 665 if filter not in self.filterpats:
665 666 l = []
666 667 for pat, cmd in self.ui.configitems(filter):
667 668 if cmd == '!':
668 669 continue
669 670 mf = matchmod.match(self.root, '', [pat])
670 671 fn = None
671 672 params = cmd
672 673 for name, filterfn in self._datafilters.iteritems():
673 674 if cmd.startswith(name):
674 675 fn = filterfn
675 676 params = cmd[len(name):].lstrip()
676 677 break
677 678 if not fn:
678 679 fn = lambda s, c, **kwargs: util.filter(s, c)
679 680 # Wrap old filters not supporting keyword arguments
680 681 if not inspect.getargspec(fn)[2]:
681 682 oldfn = fn
682 683 fn = lambda s, c, **kwargs: oldfn(s, c)
683 684 l.append((mf, fn, params))
684 685 self.filterpats[filter] = l
685 686 return self.filterpats[filter]
686 687
687 688 def _filter(self, filterpats, filename, data):
688 689 for mf, fn, cmd in filterpats:
689 690 if mf(filename):
690 691 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 692 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 693 break
693 694
694 695 return data
695 696
696 697 @propertycache
697 698 def _encodefilterpats(self):
698 699 return self._loadfilter('encode')
699 700
700 701 @propertycache
701 702 def _decodefilterpats(self):
702 703 return self._loadfilter('decode')
703 704
704 705 def adddatafilter(self, name, filter):
705 706 self._datafilters[name] = filter
706 707
707 708 def wread(self, filename):
708 709 if self._link(filename):
709 710 data = os.readlink(self.wjoin(filename))
710 711 else:
711 712 data = self.wopener.read(filename)
712 713 return self._filter(self._encodefilterpats, filename, data)
713 714
714 715 def wwrite(self, filename, data, flags):
715 716 data = self._filter(self._decodefilterpats, filename, data)
716 717 if 'l' in flags:
717 718 self.wopener.symlink(data, filename)
718 719 else:
719 720 self.wopener.write(filename, data)
720 721 if 'x' in flags:
721 722 util.setflags(self.wjoin(filename), False, True)
722 723
723 724 def wwritedata(self, filename, data):
724 725 return self._filter(self._decodefilterpats, filename, data)
725 726
726 727 def transaction(self, desc):
727 728 tr = self._transref and self._transref() or None
728 729 if tr and tr.running():
729 730 return tr.nest()
730 731
731 732 # abort here if the journal already exists
732 733 if os.path.exists(self.sjoin("journal")):
733 734 raise error.RepoError(
734 735 _("abandoned transaction found - run hg recover"))
735 736
736 737 journalfiles = self._writejournal(desc)
737 738 renames = [(x, undoname(x)) for x in journalfiles]
738 739
739 740 tr = transaction.transaction(self.ui.warn, self.sopener,
740 741 self.sjoin("journal"),
741 742 aftertrans(renames),
742 743 self.store.createmode)
743 744 self._transref = weakref.ref(tr)
744 745 return tr
745 746
746 747 def _writejournal(self, desc):
747 748 # save dirstate for rollback
748 749 try:
749 750 ds = self.opener.read("dirstate")
750 751 except IOError:
751 752 ds = ""
752 753 self.opener.write("journal.dirstate", ds)
753 754 self.opener.write("journal.branch",
754 755 encoding.fromlocal(self.dirstate.branch()))
755 756 self.opener.write("journal.desc",
756 757 "%d\n%s\n" % (len(self), desc))
757 758
758 759 bkname = self.join('bookmarks')
759 760 if os.path.exists(bkname):
760 761 util.copyfile(bkname, self.join('journal.bookmarks'))
761 762 else:
762 763 self.opener.write('journal.bookmarks', '')
763 764 phasesname = self.sjoin('phaseroots')
764 765 if os.path.exists(phasesname):
765 766 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
766 767 else:
767 768 self.sopener.write('journal.phaseroots', '')
768 769
769 770 return (self.sjoin('journal'), self.join('journal.dirstate'),
770 771 self.join('journal.branch'), self.join('journal.desc'),
771 772 self.join('journal.bookmarks'),
772 773 self.sjoin('journal.phaseroots'))
773 774
774 775 def recover(self):
775 776 lock = self.lock()
776 777 try:
777 778 if os.path.exists(self.sjoin("journal")):
778 779 self.ui.status(_("rolling back interrupted transaction\n"))
779 780 transaction.rollback(self.sopener, self.sjoin("journal"),
780 781 self.ui.warn)
781 782 self.invalidate()
782 783 return True
783 784 else:
784 785 self.ui.warn(_("no interrupted transaction available\n"))
785 786 return False
786 787 finally:
787 788 lock.release()
788 789
789 790 def rollback(self, dryrun=False, force=False):
790 791 wlock = lock = None
791 792 try:
792 793 wlock = self.wlock()
793 794 lock = self.lock()
794 795 if os.path.exists(self.sjoin("undo")):
795 796 return self._rollback(dryrun, force)
796 797 else:
797 798 self.ui.warn(_("no rollback information available\n"))
798 799 return 1
799 800 finally:
800 801 release(lock, wlock)
801 802
802 803 def _rollback(self, dryrun, force):
803 804 ui = self.ui
804 805 try:
805 806 args = self.opener.read('undo.desc').splitlines()
806 807 (oldlen, desc, detail) = (int(args[0]), args[1], None)
807 808 if len(args) >= 3:
808 809 detail = args[2]
809 810 oldtip = oldlen - 1
810 811
811 812 if detail and ui.verbose:
812 813 msg = (_('repository tip rolled back to revision %s'
813 814 ' (undo %s: %s)\n')
814 815 % (oldtip, desc, detail))
815 816 else:
816 817 msg = (_('repository tip rolled back to revision %s'
817 818 ' (undo %s)\n')
818 819 % (oldtip, desc))
819 820 except IOError:
820 821 msg = _('rolling back unknown transaction\n')
821 822 desc = None
822 823
823 824 if not force and self['.'] != self['tip'] and desc == 'commit':
824 825 raise util.Abort(
825 826 _('rollback of last commit while not checked out '
826 827 'may lose data'), hint=_('use -f to force'))
827 828
828 829 ui.status(msg)
829 830 if dryrun:
830 831 return 0
831 832
832 833 parents = self.dirstate.parents()
833 834 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
834 835 if os.path.exists(self.join('undo.bookmarks')):
835 836 util.rename(self.join('undo.bookmarks'),
836 837 self.join('bookmarks'))
837 838 if os.path.exists(self.sjoin('undo.phaseroots')):
838 839 util.rename(self.sjoin('undo.phaseroots'),
839 840 self.sjoin('phaseroots'))
840 841 self.invalidate()
841 842
842 843 parentgone = (parents[0] not in self.changelog.nodemap or
843 844 parents[1] not in self.changelog.nodemap)
844 845 if parentgone:
845 846 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
846 847 try:
847 848 branch = self.opener.read('undo.branch')
848 849 self.dirstate.setbranch(branch)
849 850 except IOError:
850 851 ui.warn(_('named branch could not be reset: '
851 852 'current branch is still \'%s\'\n')
852 853 % self.dirstate.branch())
853 854
854 855 self.dirstate.invalidate()
855 856 parents = tuple([p.rev() for p in self.parents()])
856 857 if len(parents) > 1:
857 858 ui.status(_('working directory now based on '
858 859 'revisions %d and %d\n') % parents)
859 860 else:
860 861 ui.status(_('working directory now based on '
861 862 'revision %d\n') % parents)
862 863 self.destroyed()
863 864 return 0
864 865
865 866 def invalidatecaches(self):
866 867 try:
867 868 delattr(self, '_tagscache')
868 869 except AttributeError:
869 870 pass
870 871
871 872 self._branchcache = None # in UTF-8
872 873 self._branchcachetip = None
873 874
874 875 def invalidatedirstate(self):
875 876 '''Invalidates the dirstate, causing the next call to dirstate
876 877 to check if it was modified since the last time it was read,
877 878 rereading it if it has.
878 879
879 880 This is different to dirstate.invalidate() that it doesn't always
880 881 rereads the dirstate. Use dirstate.invalidate() if you want to
881 882 explicitly read the dirstate again (i.e. restoring it to a previous
882 883 known good state).'''
883 884 try:
884 885 delattr(self, 'dirstate')
885 886 except AttributeError:
886 887 pass
887 888
888 889 def invalidate(self):
889 890 for k in self._filecache:
890 891 # dirstate is invalidated separately in invalidatedirstate()
891 892 if k == 'dirstate':
892 893 continue
893 894
894 895 try:
895 896 delattr(self, k)
896 897 except AttributeError:
897 898 pass
898 899 self.invalidatecaches()
899 900
900 901 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 902 try:
902 903 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 904 except error.LockHeld, inst:
904 905 if not wait:
905 906 raise
906 907 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 908 (desc, inst.locker))
908 909 # default to 600 seconds timeout
909 910 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 911 releasefn, desc=desc)
911 912 if acquirefn:
912 913 acquirefn()
913 914 return l
914 915
915 916 def _afterlock(self, callback):
916 917 """add a callback to the current repository lock.
917 918
918 919 The callback will be executed on lock release."""
919 920 l = self._lockref and self._lockref()
920 921 if l:
921 922 l.postrelease.append(callback)
922 923
923 924 def lock(self, wait=True):
924 925 '''Lock the repository store (.hg/store) and return a weak reference
925 926 to the lock. Use this before modifying the store (e.g. committing or
926 927 stripping). If you are opening a transaction, get a lock as well.)'''
927 928 l = self._lockref and self._lockref()
928 929 if l is not None and l.held:
929 930 l.lock()
930 931 return l
931 932
932 933 def unlock():
933 934 self.store.write()
934 935 if self._dirtyphases:
935 936 phases.writeroots(self)
936 937 for k, ce in self._filecache.items():
937 938 if k == 'dirstate':
938 939 continue
939 940 ce.refresh()
940 941
941 942 l = self._lock(self.sjoin("lock"), wait, unlock,
942 943 self.invalidate, _('repository %s') % self.origroot)
943 944 self._lockref = weakref.ref(l)
944 945 return l
945 946
946 947 def wlock(self, wait=True):
947 948 '''Lock the non-store parts of the repository (everything under
948 949 .hg except .hg/store) and return a weak reference to the lock.
949 950 Use this before modifying files in .hg.'''
950 951 l = self._wlockref and self._wlockref()
951 952 if l is not None and l.held:
952 953 l.lock()
953 954 return l
954 955
955 956 def unlock():
956 957 self.dirstate.write()
957 958 ce = self._filecache.get('dirstate')
958 959 if ce:
959 960 ce.refresh()
960 961
961 962 l = self._lock(self.join("wlock"), wait, unlock,
962 963 self.invalidatedirstate, _('working directory of %s') %
963 964 self.origroot)
964 965 self._wlockref = weakref.ref(l)
965 966 return l
966 967
967 968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 969 """
969 970 commit an individual file as part of a larger transaction
970 971 """
971 972
972 973 fname = fctx.path()
973 974 text = fctx.data()
974 975 flog = self.file(fname)
975 976 fparent1 = manifest1.get(fname, nullid)
976 977 fparent2 = fparent2o = manifest2.get(fname, nullid)
977 978
978 979 meta = {}
979 980 copy = fctx.renamed()
980 981 if copy and copy[0] != fname:
981 982 # Mark the new revision of this file as a copy of another
982 983 # file. This copy data will effectively act as a parent
983 984 # of this new revision. If this is a merge, the first
984 985 # parent will be the nullid (meaning "look up the copy data")
985 986 # and the second one will be the other parent. For example:
986 987 #
987 988 # 0 --- 1 --- 3 rev1 changes file foo
988 989 # \ / rev2 renames foo to bar and changes it
989 990 # \- 2 -/ rev3 should have bar with all changes and
990 991 # should record that bar descends from
991 992 # bar in rev2 and foo in rev1
992 993 #
993 994 # this allows this merge to succeed:
994 995 #
995 996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 997 # \ / merging rev3 and rev4 should use bar@rev2
997 998 # \- 2 --- 4 as the merge base
998 999 #
999 1000
1000 1001 cfname = copy[0]
1001 1002 crev = manifest1.get(cfname)
1002 1003 newfparent = fparent2
1003 1004
1004 1005 if manifest2: # branch merge
1005 1006 if fparent2 == nullid or crev is None: # copied on remote side
1006 1007 if cfname in manifest2:
1007 1008 crev = manifest2[cfname]
1008 1009 newfparent = fparent1
1009 1010
1010 1011 # find source in nearest ancestor if we've lost track
1011 1012 if not crev:
1012 1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 1014 (fname, cfname))
1014 1015 for ancestor in self[None].ancestors():
1015 1016 if cfname in ancestor:
1016 1017 crev = ancestor[cfname].filenode()
1017 1018 break
1018 1019
1019 1020 if crev:
1020 1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 1022 meta["copy"] = cfname
1022 1023 meta["copyrev"] = hex(crev)
1023 1024 fparent1, fparent2 = nullid, newfparent
1024 1025 else:
1025 1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 1027 "copied from '%s'!\n") % (fname, cfname))
1027 1028
1028 1029 elif fparent2 != nullid:
1029 1030 # is one parent an ancestor of the other?
1030 1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 1032 if fparentancestor == fparent1:
1032 1033 fparent1, fparent2 = fparent2, nullid
1033 1034 elif fparentancestor == fparent2:
1034 1035 fparent2 = nullid
1035 1036
1036 1037 # is the file changed?
1037 1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 1039 changelist.append(fname)
1039 1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040 1041
1041 1042 # are just the flags changed during merge?
1042 1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 1044 changelist.append(fname)
1044 1045
1045 1046 return fparent1
1046 1047
1047 1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 1049 editor=False, extra={}):
1049 1050 """Add a new revision to current repository.
1050 1051
1051 1052 Revision information is gathered from the working directory,
1052 1053 match can be used to filter the committed files. If editor is
1053 1054 supplied, it is called to get a commit message.
1054 1055 """
1055 1056
1056 1057 def fail(f, msg):
1057 1058 raise util.Abort('%s: %s' % (f, msg))
1058 1059
1059 1060 if not match:
1060 1061 match = matchmod.always(self.root, '')
1061 1062
1062 1063 if not force:
1063 1064 vdirs = []
1064 1065 match.dir = vdirs.append
1065 1066 match.bad = fail
1066 1067
1067 1068 wlock = self.wlock()
1068 1069 try:
1069 1070 wctx = self[None]
1070 1071 merge = len(wctx.parents()) > 1
1071 1072
1072 1073 if (not force and merge and match and
1073 1074 (match.files() or match.anypats())):
1074 1075 raise util.Abort(_('cannot partially commit a merge '
1075 1076 '(do not specify files or patterns)'))
1076 1077
1077 1078 changes = self.status(match=match, clean=force)
1078 1079 if force:
1079 1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1080 1081
1081 1082 # check subrepos
1082 1083 subs = []
1083 1084 removedsubs = set()
1084 1085 if '.hgsub' in wctx:
1085 1086 # only manage subrepos and .hgsubstate if .hgsub is present
1086 1087 for p in wctx.parents():
1087 1088 removedsubs.update(s for s in p.substate if match(s))
1088 1089 for s in wctx.substate:
1089 1090 removedsubs.discard(s)
1090 1091 if match(s) and wctx.sub(s).dirty():
1091 1092 subs.append(s)
1092 1093 if (subs or removedsubs):
1093 1094 if (not match('.hgsub') and
1094 1095 '.hgsub' in (wctx.modified() + wctx.added())):
1095 1096 raise util.Abort(
1096 1097 _("can't commit subrepos without .hgsub"))
1097 1098 if '.hgsubstate' not in changes[0]:
1098 1099 changes[0].insert(0, '.hgsubstate')
1099 1100 if '.hgsubstate' in changes[2]:
1100 1101 changes[2].remove('.hgsubstate')
1101 1102 elif '.hgsub' in changes[2]:
1102 1103 # clean up .hgsubstate when .hgsub is removed
1103 1104 if ('.hgsubstate' in wctx and
1104 1105 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1105 1106 changes[2].insert(0, '.hgsubstate')
1106 1107
1107 1108 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1108 1109 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1109 1110 if changedsubs:
1110 1111 raise util.Abort(_("uncommitted changes in subrepo %s")
1111 1112 % changedsubs[0],
1112 1113 hint=_("use --subrepos for recursive commit"))
1113 1114
1114 1115 # make sure all explicit patterns are matched
1115 1116 if not force and match.files():
1116 1117 matched = set(changes[0] + changes[1] + changes[2])
1117 1118
1118 1119 for f in match.files():
1119 1120 if f == '.' or f in matched or f in wctx.substate:
1120 1121 continue
1121 1122 if f in changes[3]: # missing
1122 1123 fail(f, _('file not found!'))
1123 1124 if f in vdirs: # visited directory
1124 1125 d = f + '/'
1125 1126 for mf in matched:
1126 1127 if mf.startswith(d):
1127 1128 break
1128 1129 else:
1129 1130 fail(f, _("no match under directory!"))
1130 1131 elif f not in self.dirstate:
1131 1132 fail(f, _("file not tracked!"))
1132 1133
1133 1134 if (not force and not extra.get("close") and not merge
1134 1135 and not (changes[0] or changes[1] or changes[2])
1135 1136 and wctx.branch() == wctx.p1().branch()):
1136 1137 return None
1137 1138
1138 1139 ms = mergemod.mergestate(self)
1139 1140 for f in changes[0]:
1140 1141 if f in ms and ms[f] == 'u':
1141 1142 raise util.Abort(_("unresolved merge conflicts "
1142 1143 "(see hg help resolve)"))
1143 1144
1144 1145 cctx = context.workingctx(self, text, user, date, extra, changes)
1145 1146 if editor:
1146 1147 cctx._text = editor(self, cctx, subs)
1147 1148 edited = (text != cctx._text)
1148 1149
1149 1150 # commit subs
1150 1151 if subs or removedsubs:
1151 1152 state = wctx.substate.copy()
1152 1153 for s in sorted(subs):
1153 1154 sub = wctx.sub(s)
1154 1155 self.ui.status(_('committing subrepository %s\n') %
1155 1156 subrepo.subrelpath(sub))
1156 1157 sr = sub.commit(cctx._text, user, date)
1157 1158 state[s] = (state[s][0], sr)
1158 1159 subrepo.writestate(self, state)
1159 1160
1160 1161 # Save commit message in case this transaction gets rolled back
1161 1162 # (e.g. by a pretxncommit hook). Leave the content alone on
1162 1163 # the assumption that the user will use the same editor again.
1163 1164 msgfn = self.savecommitmessage(cctx._text)
1164 1165
1165 1166 p1, p2 = self.dirstate.parents()
1166 1167 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1167 1168 try:
1168 1169 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1169 1170 ret = self.commitctx(cctx, True)
1170 1171 except:
1171 1172 if edited:
1172 1173 self.ui.write(
1173 1174 _('note: commit message saved in %s\n') % msgfn)
1174 1175 raise
1175 1176
1176 1177 # update bookmarks, dirstate and mergestate
1177 1178 bookmarks.update(self, p1, ret)
1178 1179 for f in changes[0] + changes[1]:
1179 1180 self.dirstate.normal(f)
1180 1181 for f in changes[2]:
1181 1182 self.dirstate.drop(f)
1182 1183 self.dirstate.setparents(ret)
1183 1184 ms.reset()
1184 1185 finally:
1185 1186 wlock.release()
1186 1187
1187 1188 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1188 1189 return ret
1189 1190
1190 1191 def commitctx(self, ctx, error=False):
1191 1192 """Add a new revision to current repository.
1192 1193 Revision information is passed via the context argument.
1193 1194 """
1194 1195
1195 1196 tr = lock = None
1196 1197 removed = list(ctx.removed())
1197 1198 p1, p2 = ctx.p1(), ctx.p2()
1198 1199 user = ctx.user()
1199 1200
1200 1201 lock = self.lock()
1201 1202 try:
1202 1203 tr = self.transaction("commit")
1203 1204 trp = weakref.proxy(tr)
1204 1205
1205 1206 if ctx.files():
1206 1207 m1 = p1.manifest().copy()
1207 1208 m2 = p2.manifest()
1208 1209
1209 1210 # check in files
1210 1211 new = {}
1211 1212 changed = []
1212 1213 linkrev = len(self)
1213 1214 for f in sorted(ctx.modified() + ctx.added()):
1214 1215 self.ui.note(f + "\n")
1215 1216 try:
1216 1217 fctx = ctx[f]
1217 1218 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1218 1219 changed)
1219 1220 m1.set(f, fctx.flags())
1220 1221 except OSError, inst:
1221 1222 self.ui.warn(_("trouble committing %s!\n") % f)
1222 1223 raise
1223 1224 except IOError, inst:
1224 1225 errcode = getattr(inst, 'errno', errno.ENOENT)
1225 1226 if error or errcode and errcode != errno.ENOENT:
1226 1227 self.ui.warn(_("trouble committing %s!\n") % f)
1227 1228 raise
1228 1229 else:
1229 1230 removed.append(f)
1230 1231
1231 1232 # update manifest
1232 1233 m1.update(new)
1233 1234 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1234 1235 drop = [f for f in removed if f in m1]
1235 1236 for f in drop:
1236 1237 del m1[f]
1237 1238 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1238 1239 p2.manifestnode(), (new, drop))
1239 1240 files = changed + removed
1240 1241 else:
1241 1242 mn = p1.manifestnode()
1242 1243 files = []
1243 1244
1244 1245 # update changelog
1245 1246 self.changelog.delayupdate()
1246 1247 n = self.changelog.add(mn, files, ctx.description(),
1247 1248 trp, p1.node(), p2.node(),
1248 1249 user, ctx.date(), ctx.extra().copy())
1249 1250 p = lambda: self.changelog.writepending() and self.root or ""
1250 1251 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1251 1252 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1252 1253 parent2=xp2, pending=p)
1253 1254 self.changelog.finalize(trp)
1254 1255 # set the new commit is proper phase
1255 1256 targetphase = self.ui.configint('phases', 'new-commit', 1)
1256 1257 if targetphase:
1257 1258 # retract boundary do not alter parent changeset.
1258 1259 # if a parent have higher the resulting phase will
1259 1260 # be compliant anyway
1260 1261 #
1261 1262 # if minimal phase was 0 we don't need to retract anything
1262 1263 phases.retractboundary(self, targetphase, [n])
1263 1264 tr.close()
1264 1265
1265 1266 if self._branchcache:
1266 1267 self.updatebranchcache()
1267 1268 return n
1268 1269 finally:
1269 1270 if tr:
1270 1271 tr.release()
1271 1272 lock.release()
1272 1273
1273 1274 def destroyed(self):
1274 1275 '''Inform the repository that nodes have been destroyed.
1275 1276 Intended for use by strip and rollback, so there's a common
1276 1277 place for anything that has to be done after destroying history.'''
1277 1278 # XXX it might be nice if we could take the list of destroyed
1278 1279 # nodes, but I don't see an easy way for rollback() to do that
1279 1280
1280 1281 # Ensure the persistent tag cache is updated. Doing it now
1281 1282 # means that the tag cache only has to worry about destroyed
1282 1283 # heads immediately after a strip/rollback. That in turn
1283 1284 # guarantees that "cachetip == currenttip" (comparing both rev
1284 1285 # and node) always means no nodes have been added or destroyed.
1285 1286
1286 1287 # XXX this is suboptimal when qrefresh'ing: we strip the current
1287 1288 # head, refresh the tag cache, then immediately add a new head.
1288 1289 # But I think doing it this way is necessary for the "instant
1289 1290 # tag cache retrieval" case to work.
1290 1291 self.invalidatecaches()
1291 1292
1292 1293 def walk(self, match, node=None):
1293 1294 '''
1294 1295 walk recursively through the directory tree or a given
1295 1296 changeset, finding all files matched by the match
1296 1297 function
1297 1298 '''
1298 1299 return self[node].walk(match)
1299 1300
1300 1301 def status(self, node1='.', node2=None, match=None,
1301 1302 ignored=False, clean=False, unknown=False,
1302 1303 listsubrepos=False):
1303 1304 """return status of files between two nodes or node and working directory
1304 1305
1305 1306 If node1 is None, use the first dirstate parent instead.
1306 1307 If node2 is None, compare node1 with working directory.
1307 1308 """
1308 1309
1309 1310 def mfmatches(ctx):
1310 1311 mf = ctx.manifest().copy()
1311 1312 for fn in mf.keys():
1312 1313 if not match(fn):
1313 1314 del mf[fn]
1314 1315 return mf
1315 1316
1316 1317 if isinstance(node1, context.changectx):
1317 1318 ctx1 = node1
1318 1319 else:
1319 1320 ctx1 = self[node1]
1320 1321 if isinstance(node2, context.changectx):
1321 1322 ctx2 = node2
1322 1323 else:
1323 1324 ctx2 = self[node2]
1324 1325
1325 1326 working = ctx2.rev() is None
1326 1327 parentworking = working and ctx1 == self['.']
1327 1328 match = match or matchmod.always(self.root, self.getcwd())
1328 1329 listignored, listclean, listunknown = ignored, clean, unknown
1329 1330
1330 1331 # load earliest manifest first for caching reasons
1331 1332 if not working and ctx2.rev() < ctx1.rev():
1332 1333 ctx2.manifest()
1333 1334
1334 1335 if not parentworking:
1335 1336 def bad(f, msg):
1336 1337 if f not in ctx1:
1337 1338 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1338 1339 match.bad = bad
1339 1340
1340 1341 if working: # we need to scan the working dir
1341 1342 subrepos = []
1342 1343 if '.hgsub' in self.dirstate:
1343 1344 subrepos = ctx2.substate.keys()
1344 1345 s = self.dirstate.status(match, subrepos, listignored,
1345 1346 listclean, listunknown)
1346 1347 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1347 1348
1348 1349 # check for any possibly clean files
1349 1350 if parentworking and cmp:
1350 1351 fixup = []
1351 1352 # do a full compare of any files that might have changed
1352 1353 for f in sorted(cmp):
1353 1354 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1354 1355 or ctx1[f].cmp(ctx2[f])):
1355 1356 modified.append(f)
1356 1357 else:
1357 1358 fixup.append(f)
1358 1359
1359 1360 # update dirstate for files that are actually clean
1360 1361 if fixup:
1361 1362 if listclean:
1362 1363 clean += fixup
1363 1364
1364 1365 try:
1365 1366 # updating the dirstate is optional
1366 1367 # so we don't wait on the lock
1367 1368 wlock = self.wlock(False)
1368 1369 try:
1369 1370 for f in fixup:
1370 1371 self.dirstate.normal(f)
1371 1372 finally:
1372 1373 wlock.release()
1373 1374 except error.LockError:
1374 1375 pass
1375 1376
1376 1377 if not parentworking:
1377 1378 mf1 = mfmatches(ctx1)
1378 1379 if working:
1379 1380 # we are comparing working dir against non-parent
1380 1381 # generate a pseudo-manifest for the working dir
1381 1382 mf2 = mfmatches(self['.'])
1382 1383 for f in cmp + modified + added:
1383 1384 mf2[f] = None
1384 1385 mf2.set(f, ctx2.flags(f))
1385 1386 for f in removed:
1386 1387 if f in mf2:
1387 1388 del mf2[f]
1388 1389 else:
1389 1390 # we are comparing two revisions
1390 1391 deleted, unknown, ignored = [], [], []
1391 1392 mf2 = mfmatches(ctx2)
1392 1393
1393 1394 modified, added, clean = [], [], []
1394 1395 for fn in mf2:
1395 1396 if fn in mf1:
1396 1397 if (fn not in deleted and
1397 1398 (mf1.flags(fn) != mf2.flags(fn) or
1398 1399 (mf1[fn] != mf2[fn] and
1399 1400 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1400 1401 modified.append(fn)
1401 1402 elif listclean:
1402 1403 clean.append(fn)
1403 1404 del mf1[fn]
1404 1405 elif fn not in deleted:
1405 1406 added.append(fn)
1406 1407 removed = mf1.keys()
1407 1408
1408 1409 if working and modified and not self.dirstate._checklink:
1409 1410 # Symlink placeholders may get non-symlink-like contents
1410 1411 # via user error or dereferencing by NFS or Samba servers,
1411 1412 # so we filter out any placeholders that don't look like a
1412 1413 # symlink
1413 1414 sane = []
1414 1415 for f in modified:
1415 1416 if ctx2.flags(f) == 'l':
1416 1417 d = ctx2[f].data()
1417 1418 if len(d) >= 1024 or '\n' in d or util.binary(d):
1418 1419 self.ui.debug('ignoring suspect symlink placeholder'
1419 1420 ' "%s"\n' % f)
1420 1421 continue
1421 1422 sane.append(f)
1422 1423 modified = sane
1423 1424
1424 1425 r = modified, added, removed, deleted, unknown, ignored, clean
1425 1426
1426 1427 if listsubrepos:
1427 1428 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1428 1429 if working:
1429 1430 rev2 = None
1430 1431 else:
1431 1432 rev2 = ctx2.substate[subpath][1]
1432 1433 try:
1433 1434 submatch = matchmod.narrowmatcher(subpath, match)
1434 1435 s = sub.status(rev2, match=submatch, ignored=listignored,
1435 1436 clean=listclean, unknown=listunknown,
1436 1437 listsubrepos=True)
1437 1438 for rfiles, sfiles in zip(r, s):
1438 1439 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1439 1440 except error.LookupError:
1440 1441 self.ui.status(_("skipping missing subrepository: %s\n")
1441 1442 % subpath)
1442 1443
1443 1444 for l in r:
1444 1445 l.sort()
1445 1446 return r
1446 1447
1447 1448 def heads(self, start=None):
1448 1449 heads = self.changelog.heads(start)
1449 1450 # sort the output in rev descending order
1450 1451 return sorted(heads, key=self.changelog.rev, reverse=True)
1451 1452
1452 1453 def branchheads(self, branch=None, start=None, closed=False):
1453 1454 '''return a (possibly filtered) list of heads for the given branch
1454 1455
1455 1456 Heads are returned in topological order, from newest to oldest.
1456 1457 If branch is None, use the dirstate branch.
1457 1458 If start is not None, return only heads reachable from start.
1458 1459 If closed is True, return heads that are marked as closed as well.
1459 1460 '''
1460 1461 if branch is None:
1461 1462 branch = self[None].branch()
1462 1463 branches = self.branchmap()
1463 1464 if branch not in branches:
1464 1465 return []
1465 1466 # the cache returns heads ordered lowest to highest
1466 1467 bheads = list(reversed(branches[branch]))
1467 1468 if start is not None:
1468 1469 # filter out the heads that cannot be reached from startrev
1469 1470 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1470 1471 bheads = [h for h in bheads if h in fbheads]
1471 1472 if not closed:
1472 1473 bheads = [h for h in bheads if
1473 1474 ('close' not in self.changelog.read(h)[5])]
1474 1475 return bheads
1475 1476
1476 1477 def branches(self, nodes):
1477 1478 if not nodes:
1478 1479 nodes = [self.changelog.tip()]
1479 1480 b = []
1480 1481 for n in nodes:
1481 1482 t = n
1482 1483 while True:
1483 1484 p = self.changelog.parents(n)
1484 1485 if p[1] != nullid or p[0] == nullid:
1485 1486 b.append((t, n, p[0], p[1]))
1486 1487 break
1487 1488 n = p[0]
1488 1489 return b
1489 1490
1490 1491 def between(self, pairs):
1491 1492 r = []
1492 1493
1493 1494 for top, bottom in pairs:
1494 1495 n, l, i = top, [], 0
1495 1496 f = 1
1496 1497
1497 1498 while n != bottom and n != nullid:
1498 1499 p = self.changelog.parents(n)[0]
1499 1500 if i == f:
1500 1501 l.append(n)
1501 1502 f = f * 2
1502 1503 n = p
1503 1504 i += 1
1504 1505
1505 1506 r.append(l)
1506 1507
1507 1508 return r
1508 1509
1509 1510 def pull(self, remote, heads=None, force=False):
1510 1511 lock = self.lock()
1511 1512 try:
1512 1513 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1513 1514 force=force)
1514 1515 common, fetch, rheads = tmp
1515 1516 if not fetch:
1516 1517 self.ui.status(_("no changes found\n"))
1517 1518 added = []
1518 1519 result = 0
1519 1520 else:
1520 1521 if heads is None and list(common) == [nullid]:
1521 1522 self.ui.status(_("requesting all changes\n"))
1522 1523 elif heads is None and remote.capable('changegroupsubset'):
1523 1524 # issue1320, avoid a race if remote changed after discovery
1524 1525 heads = rheads
1525 1526
1526 1527 if remote.capable('getbundle'):
1527 1528 cg = remote.getbundle('pull', common=common,
1528 1529 heads=heads or rheads)
1529 1530 elif heads is None:
1530 1531 cg = remote.changegroup(fetch, 'pull')
1531 1532 elif not remote.capable('changegroupsubset'):
1532 1533 raise util.Abort(_("partial pull cannot be done because "
1533 1534 "other repository doesn't support "
1534 1535 "changegroupsubset."))
1535 1536 else:
1536 1537 cg = remote.changegroupsubset(fetch, heads, 'pull')
1537 1538 clstart = len(self.changelog)
1538 1539 result = self.addchangegroup(cg, 'pull', remote.url())
1539 1540 clend = len(self.changelog)
1540 1541 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1541 1542
1542 1543
1543 1544 # Get remote phases data from remote
1544 1545 remotephases = remote.listkeys('phases')
1545 1546 publishing = bool(remotephases.get('publishing', False))
1546 1547 if remotephases and not publishing:
1547 1548 # remote is new and unpublishing
1548 1549 subset = common + added
1549 1550 rheads, rroots = phases.analyzeremotephases(self, subset,
1550 1551 remotephases)
1551 1552 for phase, boundary in enumerate(rheads):
1552 1553 phases.advanceboundary(self, phase, boundary)
1553 1554 else:
1554 1555 # Remote is old or publishing all common changesets
1555 1556 # should be seen as public
1556 1557 phases.advanceboundary(self, 0, common + added)
1557 1558 finally:
1558 1559 lock.release()
1559 1560
1560 1561 return result
1561 1562
1562 1563 def checkpush(self, force, revs):
1563 1564 """Extensions can override this function if additional checks have
1564 1565 to be performed before pushing, or call it if they override push
1565 1566 command.
1566 1567 """
1567 1568 pass
1568 1569
1569 1570 def push(self, remote, force=False, revs=None, newbranch=False):
1570 1571 '''Push outgoing changesets (limited by revs) from the current
1571 1572 repository to remote. Return an integer:
1572 1573 - 0 means HTTP error *or* nothing to push
1573 1574 - 1 means we pushed and remote head count is unchanged *or*
1574 1575 we have outgoing changesets but refused to push
1575 1576 - other values as described by addchangegroup()
1576 1577 '''
1577 1578 # there are two ways to push to remote repo:
1578 1579 #
1579 1580 # addchangegroup assumes local user can lock remote
1580 1581 # repo (local filesystem, old ssh servers).
1581 1582 #
1582 1583 # unbundle assumes local user cannot lock remote repo (new ssh
1583 1584 # servers, http servers).
1584 1585
1585 1586 self.checkpush(force, revs)
1586 1587 lock = None
1587 1588 unbundle = remote.capable('unbundle')
1588 1589 if not unbundle:
1589 1590 lock = remote.lock()
1590 1591 try:
1591 1592 # get local lock as we might write phase data
1592 1593 locallock = self.lock()
1593 1594 try:
1594 1595 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1595 1596 revs, newbranch)
1596 1597 ret = remote_heads
1597 1598 # create a callback for addchangegroup.
1598 1599 # If will be used branch of the conditionnal too.
1599 1600 if cg is not None:
1600 1601 if unbundle:
1601 1602 # local repo finds heads on server, finds out what
1602 1603 # revs it must push. once revs transferred, if server
1603 1604 # finds it has different heads (someone else won
1604 1605 # commit/push race), server aborts.
1605 1606 if force:
1606 1607 remote_heads = ['force']
1607 1608 # ssh: return remote's addchangegroup()
1608 1609 # http: return remote's addchangegroup() or 0 for error
1609 1610 ret = remote.unbundle(cg, remote_heads, 'push')
1610 1611 else:
1611 1612 # we return an integer indicating remote head count change
1612 1613 ret = remote.addchangegroup(cg, 'push', self.url())
1613 1614
1614 1615 # even when we don't push, exchanging phase data is useful
1615 1616 remotephases = remote.listkeys('phases')
1616 1617 if not remotephases: # old server or public only repo
1617 1618 phases.advanceboundary(self, 0, fut)
1618 1619 # don't push any phase data as there is nothing to push
1619 1620 else:
1620 1621 ana = phases.analyzeremotephases(self, fut, remotephases)
1621 1622 rheads, rroots = ana
1622 1623 ### Apply remote phase on local
1623 1624 if remotephases.get('publishing', False):
1624 1625 phases.advanceboundary(self, 0, fut)
1625 1626 else: # publish = False
1626 1627 for phase, rpheads in enumerate(rheads):
1627 1628 phases.advanceboundary(self, phase, rpheads)
1628 1629 ### Apply local phase on remote
1629 1630 #
1630 1631 # XXX If push failed we should use strict common and not
1631 1632 # future to avoir pushing phase data on unknown changeset.
1632 1633 # This is to done later.
1633 1634 futctx = [self[n] for n in fut if n != nullid]
1634 1635 for phase in phases.trackedphases[::-1]:
1635 1636 prevphase = phase -1
1636 1637 # get all candidate for head in previous phase
1637 1638 inprev = [ctx for ctx in futctx
1638 1639 if ctx.phase() == prevphase]
1639 1640 for newremotehead in self.set('heads(%ld & (%ln::))',
1640 1641 inprev, rroots[phase]):
1641 1642 r = remote.pushkey('phases',
1642 1643 newremotehead.hex(),
1643 1644 str(phase), str(prevphase))
1644 1645 if not r:
1645 1646 self.ui.warn(_('updating phase of %s'
1646 1647 'to %s failed!\n')
1647 1648 % (newremotehead, prevphase))
1648 1649 finally:
1649 1650 locallock.release()
1650 1651 finally:
1651 1652 if lock is not None:
1652 1653 lock.release()
1653 1654
1654 1655 self.ui.debug("checking for updated bookmarks\n")
1655 1656 rb = remote.listkeys('bookmarks')
1656 1657 for k in rb.keys():
1657 1658 if k in self._bookmarks:
1658 1659 nr, nl = rb[k], hex(self._bookmarks[k])
1659 1660 if nr in self:
1660 1661 cr = self[nr]
1661 1662 cl = self[nl]
1662 1663 if cl in cr.descendants():
1663 1664 r = remote.pushkey('bookmarks', k, nr, nl)
1664 1665 if r:
1665 1666 self.ui.status(_("updating bookmark %s\n") % k)
1666 1667 else:
1667 1668 self.ui.warn(_('updating bookmark %s'
1668 1669 ' failed!\n') % k)
1669 1670
1670 1671 return ret
1671 1672
1672 1673 def changegroupinfo(self, nodes, source):
1673 1674 if self.ui.verbose or source == 'bundle':
1674 1675 self.ui.status(_("%d changesets found\n") % len(nodes))
1675 1676 if self.ui.debugflag:
1676 1677 self.ui.debug("list of changesets:\n")
1677 1678 for node in nodes:
1678 1679 self.ui.debug("%s\n" % hex(node))
1679 1680
1680 1681 def changegroupsubset(self, bases, heads, source):
1681 1682 """Compute a changegroup consisting of all the nodes that are
1682 1683 descendants of any of the bases and ancestors of any of the heads.
1683 1684 Return a chunkbuffer object whose read() method will return
1684 1685 successive changegroup chunks.
1685 1686
1686 1687 It is fairly complex as determining which filenodes and which
1687 1688 manifest nodes need to be included for the changeset to be complete
1688 1689 is non-trivial.
1689 1690
1690 1691 Another wrinkle is doing the reverse, figuring out which changeset in
1691 1692 the changegroup a particular filenode or manifestnode belongs to.
1692 1693 """
1693 1694 cl = self.changelog
1694 1695 if not bases:
1695 1696 bases = [nullid]
1696 1697 csets, bases, heads = cl.nodesbetween(bases, heads)
1697 1698 # We assume that all ancestors of bases are known
1698 1699 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1699 1700 return self._changegroupsubset(common, csets, heads, source)
1700 1701
1701 1702 def getbundle(self, source, heads=None, common=None):
1702 1703 """Like changegroupsubset, but returns the set difference between the
1703 1704 ancestors of heads and the ancestors common.
1704 1705
1705 1706 If heads is None, use the local heads. If common is None, use [nullid].
1706 1707
1707 1708 The nodes in common might not all be known locally due to the way the
1708 1709 current discovery protocol works.
1709 1710 """
1710 1711 cl = self.changelog
1711 1712 if common:
1712 1713 nm = cl.nodemap
1713 1714 common = [n for n in common if n in nm]
1714 1715 else:
1715 1716 common = [nullid]
1716 1717 if not heads:
1717 1718 heads = cl.heads()
1718 1719 common, missing = cl.findcommonmissing(common, heads)
1719 1720 if not missing:
1720 1721 return None
1721 1722 return self._changegroupsubset(common, missing, heads, source)
1722 1723
1723 1724 def _changegroupsubset(self, commonrevs, csets, heads, source):
1724 1725
1725 1726 cl = self.changelog
1726 1727 mf = self.manifest
1727 1728 mfs = {} # needed manifests
1728 1729 fnodes = {} # needed file nodes
1729 1730 changedfiles = set()
1730 1731 fstate = ['', {}]
1731 1732 count = [0]
1732 1733
1733 1734 # can we go through the fast path ?
1734 1735 heads.sort()
1735 1736 if heads == sorted(self.heads()):
1736 1737 return self._changegroup(csets, source)
1737 1738
1738 1739 # slow path
1739 1740 self.hook('preoutgoing', throw=True, source=source)
1740 1741 self.changegroupinfo(csets, source)
1741 1742
1742 1743 # filter any nodes that claim to be part of the known set
1743 1744 def prune(revlog, missing):
1744 1745 return [n for n in missing
1745 1746 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1746 1747
1747 1748 def lookup(revlog, x):
1748 1749 if revlog == cl:
1749 1750 c = cl.read(x)
1750 1751 changedfiles.update(c[3])
1751 1752 mfs.setdefault(c[0], x)
1752 1753 count[0] += 1
1753 1754 self.ui.progress(_('bundling'), count[0],
1754 1755 unit=_('changesets'), total=len(csets))
1755 1756 return x
1756 1757 elif revlog == mf:
1757 1758 clnode = mfs[x]
1758 1759 mdata = mf.readfast(x)
1759 1760 for f in changedfiles:
1760 1761 if f in mdata:
1761 1762 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1762 1763 count[0] += 1
1763 1764 self.ui.progress(_('bundling'), count[0],
1764 1765 unit=_('manifests'), total=len(mfs))
1765 1766 return mfs[x]
1766 1767 else:
1767 1768 self.ui.progress(
1768 1769 _('bundling'), count[0], item=fstate[0],
1769 1770 unit=_('files'), total=len(changedfiles))
1770 1771 return fstate[1][x]
1771 1772
1772 1773 bundler = changegroup.bundle10(lookup)
1773 1774 reorder = self.ui.config('bundle', 'reorder', 'auto')
1774 1775 if reorder == 'auto':
1775 1776 reorder = None
1776 1777 else:
1777 1778 reorder = util.parsebool(reorder)
1778 1779
1779 1780 def gengroup():
1780 1781 # Create a changenode group generator that will call our functions
1781 1782 # back to lookup the owning changenode and collect information.
1782 1783 for chunk in cl.group(csets, bundler, reorder=reorder):
1783 1784 yield chunk
1784 1785 self.ui.progress(_('bundling'), None)
1785 1786
1786 1787 # Create a generator for the manifestnodes that calls our lookup
1787 1788 # and data collection functions back.
1788 1789 count[0] = 0
1789 1790 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1790 1791 yield chunk
1791 1792 self.ui.progress(_('bundling'), None)
1792 1793
1793 1794 mfs.clear()
1794 1795
1795 1796 # Go through all our files in order sorted by name.
1796 1797 count[0] = 0
1797 1798 for fname in sorted(changedfiles):
1798 1799 filerevlog = self.file(fname)
1799 1800 if not len(filerevlog):
1800 1801 raise util.Abort(_("empty or missing revlog for %s") % fname)
1801 1802 fstate[0] = fname
1802 1803 fstate[1] = fnodes.pop(fname, {})
1803 1804
1804 1805 nodelist = prune(filerevlog, fstate[1])
1805 1806 if nodelist:
1806 1807 count[0] += 1
1807 1808 yield bundler.fileheader(fname)
1808 1809 for chunk in filerevlog.group(nodelist, bundler, reorder):
1809 1810 yield chunk
1810 1811
1811 1812 # Signal that no more groups are left.
1812 1813 yield bundler.close()
1813 1814 self.ui.progress(_('bundling'), None)
1814 1815
1815 1816 if csets:
1816 1817 self.hook('outgoing', node=hex(csets[0]), source=source)
1817 1818
1818 1819 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1819 1820
1820 1821 def changegroup(self, basenodes, source):
1821 1822 # to avoid a race we use changegroupsubset() (issue1320)
1822 1823 return self.changegroupsubset(basenodes, self.heads(), source)
1823 1824
1824 1825 def _changegroup(self, nodes, source):
1825 1826 """Compute the changegroup of all nodes that we have that a recipient
1826 1827 doesn't. Return a chunkbuffer object whose read() method will return
1827 1828 successive changegroup chunks.
1828 1829
1829 1830 This is much easier than the previous function as we can assume that
1830 1831 the recipient has any changenode we aren't sending them.
1831 1832
1832 1833 nodes is the set of nodes to send"""
1833 1834
1834 1835 cl = self.changelog
1835 1836 mf = self.manifest
1836 1837 mfs = {}
1837 1838 changedfiles = set()
1838 1839 fstate = ['']
1839 1840 count = [0]
1840 1841
1841 1842 self.hook('preoutgoing', throw=True, source=source)
1842 1843 self.changegroupinfo(nodes, source)
1843 1844
1844 1845 revset = set([cl.rev(n) for n in nodes])
1845 1846
1846 1847 def gennodelst(log):
1847 1848 return [log.node(r) for r in log if log.linkrev(r) in revset]
1848 1849
1849 1850 def lookup(revlog, x):
1850 1851 if revlog == cl:
1851 1852 c = cl.read(x)
1852 1853 changedfiles.update(c[3])
1853 1854 mfs.setdefault(c[0], x)
1854 1855 count[0] += 1
1855 1856 self.ui.progress(_('bundling'), count[0],
1856 1857 unit=_('changesets'), total=len(nodes))
1857 1858 return x
1858 1859 elif revlog == mf:
1859 1860 count[0] += 1
1860 1861 self.ui.progress(_('bundling'), count[0],
1861 1862 unit=_('manifests'), total=len(mfs))
1862 1863 return cl.node(revlog.linkrev(revlog.rev(x)))
1863 1864 else:
1864 1865 self.ui.progress(
1865 1866 _('bundling'), count[0], item=fstate[0],
1866 1867 total=len(changedfiles), unit=_('files'))
1867 1868 return cl.node(revlog.linkrev(revlog.rev(x)))
1868 1869
1869 1870 bundler = changegroup.bundle10(lookup)
1870 1871 reorder = self.ui.config('bundle', 'reorder', 'auto')
1871 1872 if reorder == 'auto':
1872 1873 reorder = None
1873 1874 else:
1874 1875 reorder = util.parsebool(reorder)
1875 1876
1876 1877 def gengroup():
1877 1878 '''yield a sequence of changegroup chunks (strings)'''
1878 1879 # construct a list of all changed files
1879 1880
1880 1881 for chunk in cl.group(nodes, bundler, reorder=reorder):
1881 1882 yield chunk
1882 1883 self.ui.progress(_('bundling'), None)
1883 1884
1884 1885 count[0] = 0
1885 1886 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1886 1887 yield chunk
1887 1888 self.ui.progress(_('bundling'), None)
1888 1889
1889 1890 count[0] = 0
1890 1891 for fname in sorted(changedfiles):
1891 1892 filerevlog = self.file(fname)
1892 1893 if not len(filerevlog):
1893 1894 raise util.Abort(_("empty or missing revlog for %s") % fname)
1894 1895 fstate[0] = fname
1895 1896 nodelist = gennodelst(filerevlog)
1896 1897 if nodelist:
1897 1898 count[0] += 1
1898 1899 yield bundler.fileheader(fname)
1899 1900 for chunk in filerevlog.group(nodelist, bundler, reorder):
1900 1901 yield chunk
1901 1902 yield bundler.close()
1902 1903 self.ui.progress(_('bundling'), None)
1903 1904
1904 1905 if nodes:
1905 1906 self.hook('outgoing', node=hex(nodes[0]), source=source)
1906 1907
1907 1908 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1908 1909
1909 1910 def addchangegroup(self, source, srctype, url, emptyok=False):
1910 1911 """Add the changegroup returned by source.read() to this repo.
1911 1912 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1912 1913 the URL of the repo where this changegroup is coming from.
1913 1914
1914 1915 Return an integer summarizing the change to this repo:
1915 1916 - nothing changed or no source: 0
1916 1917 - more heads than before: 1+added heads (2..n)
1917 1918 - fewer heads than before: -1-removed heads (-2..-n)
1918 1919 - number of heads stays the same: 1
1919 1920 """
1920 1921 def csmap(x):
1921 1922 self.ui.debug("add changeset %s\n" % short(x))
1922 1923 return len(cl)
1923 1924
1924 1925 def revmap(x):
1925 1926 return cl.rev(x)
1926 1927
1927 1928 if not source:
1928 1929 return 0
1929 1930
1930 1931 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1931 1932
1932 1933 changesets = files = revisions = 0
1933 1934 efiles = set()
1934 1935
1935 1936 # write changelog data to temp files so concurrent readers will not see
1936 1937 # inconsistent view
1937 1938 cl = self.changelog
1938 1939 cl.delayupdate()
1939 1940 oldheads = cl.heads()
1940 1941
1941 1942 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1942 1943 try:
1943 1944 trp = weakref.proxy(tr)
1944 1945 # pull off the changeset group
1945 1946 self.ui.status(_("adding changesets\n"))
1946 1947 clstart = len(cl)
1947 1948 class prog(object):
1948 1949 step = _('changesets')
1949 1950 count = 1
1950 1951 ui = self.ui
1951 1952 total = None
1952 1953 def __call__(self):
1953 1954 self.ui.progress(self.step, self.count, unit=_('chunks'),
1954 1955 total=self.total)
1955 1956 self.count += 1
1956 1957 pr = prog()
1957 1958 source.callback = pr
1958 1959
1959 1960 source.changelogheader()
1960 1961 if (cl.addgroup(source, csmap, trp) is None
1961 1962 and not emptyok):
1962 1963 raise util.Abort(_("received changelog group is empty"))
1963 1964 clend = len(cl)
1964 1965 changesets = clend - clstart
1965 1966 for c in xrange(clstart, clend):
1966 1967 efiles.update(self[c].files())
1967 1968 efiles = len(efiles)
1968 1969 self.ui.progress(_('changesets'), None)
1969 1970
1970 1971 # pull off the manifest group
1971 1972 self.ui.status(_("adding manifests\n"))
1972 1973 pr.step = _('manifests')
1973 1974 pr.count = 1
1974 1975 pr.total = changesets # manifests <= changesets
1975 1976 # no need to check for empty manifest group here:
1976 1977 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1977 1978 # no new manifest will be created and the manifest group will
1978 1979 # be empty during the pull
1979 1980 source.manifestheader()
1980 1981 self.manifest.addgroup(source, revmap, trp)
1981 1982 self.ui.progress(_('manifests'), None)
1982 1983
1983 1984 needfiles = {}
1984 1985 if self.ui.configbool('server', 'validate', default=False):
1985 1986 # validate incoming csets have their manifests
1986 1987 for cset in xrange(clstart, clend):
1987 1988 mfest = self.changelog.read(self.changelog.node(cset))[0]
1988 1989 mfest = self.manifest.readdelta(mfest)
1989 1990 # store file nodes we must see
1990 1991 for f, n in mfest.iteritems():
1991 1992 needfiles.setdefault(f, set()).add(n)
1992 1993
1993 1994 # process the files
1994 1995 self.ui.status(_("adding file changes\n"))
1995 1996 pr.step = _('files')
1996 1997 pr.count = 1
1997 1998 pr.total = efiles
1998 1999 source.callback = None
1999 2000
2000 2001 while True:
2001 2002 chunkdata = source.filelogheader()
2002 2003 if not chunkdata:
2003 2004 break
2004 2005 f = chunkdata["filename"]
2005 2006 self.ui.debug("adding %s revisions\n" % f)
2006 2007 pr()
2007 2008 fl = self.file(f)
2008 2009 o = len(fl)
2009 2010 if fl.addgroup(source, revmap, trp) is None:
2010 2011 raise util.Abort(_("received file revlog group is empty"))
2011 2012 revisions += len(fl) - o
2012 2013 files += 1
2013 2014 if f in needfiles:
2014 2015 needs = needfiles[f]
2015 2016 for new in xrange(o, len(fl)):
2016 2017 n = fl.node(new)
2017 2018 if n in needs:
2018 2019 needs.remove(n)
2019 2020 if not needs:
2020 2021 del needfiles[f]
2021 2022 self.ui.progress(_('files'), None)
2022 2023
2023 2024 for f, needs in needfiles.iteritems():
2024 2025 fl = self.file(f)
2025 2026 for n in needs:
2026 2027 try:
2027 2028 fl.rev(n)
2028 2029 except error.LookupError:
2029 2030 raise util.Abort(
2030 2031 _('missing file data for %s:%s - run hg verify') %
2031 2032 (f, hex(n)))
2032 2033
2033 2034 dh = 0
2034 2035 if oldheads:
2035 2036 heads = cl.heads()
2036 2037 dh = len(heads) - len(oldheads)
2037 2038 for h in heads:
2038 2039 if h not in oldheads and 'close' in self[h].extra():
2039 2040 dh -= 1
2040 2041 htext = ""
2041 2042 if dh:
2042 2043 htext = _(" (%+d heads)") % dh
2043 2044
2044 2045 self.ui.status(_("added %d changesets"
2045 2046 " with %d changes to %d files%s\n")
2046 2047 % (changesets, revisions, files, htext))
2047 2048
2048 2049 if changesets > 0:
2049 2050 p = lambda: cl.writepending() and self.root or ""
2050 2051 self.hook('pretxnchangegroup', throw=True,
2051 2052 node=hex(cl.node(clstart)), source=srctype,
2052 2053 url=url, pending=p)
2053 2054
2054 2055 added = [cl.node(r) for r in xrange(clstart, clend)]
2055 2056 publishing = self.ui.configbool('phases', 'publish', True)
2056 2057 if publishing and srctype == 'push':
2057 2058 # Old server can not push the boundary themself.
2058 2059 # This clause ensure pushed changeset are alway marked as public
2059 2060 phases.advanceboundary(self, 0, added)
2060 2061 elif srctype != 'strip': # strip should not touch boundary at all
2061 2062 phases.retractboundary(self, 1, added)
2062 2063
2063 2064 # make changelog see real files again
2064 2065 cl.finalize(trp)
2065 2066
2066 2067 tr.close()
2067 2068
2068 2069 if changesets > 0:
2069 2070 def runhooks():
2070 2071 # forcefully update the on-disk branch cache
2071 2072 self.ui.debug("updating the branch cache\n")
2072 2073 self.updatebranchcache()
2073 2074 self.hook("changegroup", node=hex(cl.node(clstart)),
2074 2075 source=srctype, url=url)
2075 2076
2076 2077 for n in added:
2077 2078 self.hook("incoming", node=hex(n), source=srctype,
2078 2079 url=url)
2079 2080 self._afterlock(runhooks)
2080 2081
2081 2082 finally:
2082 2083 tr.release()
2083 2084 # never return 0 here:
2084 2085 if dh < 0:
2085 2086 return dh - 1
2086 2087 else:
2087 2088 return dh + 1
2088 2089
2089 2090 def stream_in(self, remote, requirements):
2090 2091 lock = self.lock()
2091 2092 try:
2092 2093 fp = remote.stream_out()
2093 2094 l = fp.readline()
2094 2095 try:
2095 2096 resp = int(l)
2096 2097 except ValueError:
2097 2098 raise error.ResponseError(
2098 2099 _('Unexpected response from remote server:'), l)
2099 2100 if resp == 1:
2100 2101 raise util.Abort(_('operation forbidden by server'))
2101 2102 elif resp == 2:
2102 2103 raise util.Abort(_('locking the remote repository failed'))
2103 2104 elif resp != 0:
2104 2105 raise util.Abort(_('the server sent an unknown error code'))
2105 2106 self.ui.status(_('streaming all changes\n'))
2106 2107 l = fp.readline()
2107 2108 try:
2108 2109 total_files, total_bytes = map(int, l.split(' ', 1))
2109 2110 except (ValueError, TypeError):
2110 2111 raise error.ResponseError(
2111 2112 _('Unexpected response from remote server:'), l)
2112 2113 self.ui.status(_('%d files to transfer, %s of data\n') %
2113 2114 (total_files, util.bytecount(total_bytes)))
2114 2115 start = time.time()
2115 2116 for i in xrange(total_files):
2116 2117 # XXX doesn't support '\n' or '\r' in filenames
2117 2118 l = fp.readline()
2118 2119 try:
2119 2120 name, size = l.split('\0', 1)
2120 2121 size = int(size)
2121 2122 except (ValueError, TypeError):
2122 2123 raise error.ResponseError(
2123 2124 _('Unexpected response from remote server:'), l)
2124 2125 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2125 2126 # for backwards compat, name was partially encoded
2126 2127 ofp = self.sopener(store.decodedir(name), 'w')
2127 2128 for chunk in util.filechunkiter(fp, limit=size):
2128 2129 ofp.write(chunk)
2129 2130 ofp.close()
2130 2131 elapsed = time.time() - start
2131 2132 if elapsed <= 0:
2132 2133 elapsed = 0.001
2133 2134 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2134 2135 (util.bytecount(total_bytes), elapsed,
2135 2136 util.bytecount(total_bytes / elapsed)))
2136 2137
2137 2138 # new requirements = old non-format requirements + new format-related
2138 2139 # requirements from the streamed-in repository
2139 2140 requirements.update(set(self.requirements) - self.supportedformats)
2140 2141 self._applyrequirements(requirements)
2141 2142 self._writerequirements()
2142 2143
2143 2144 self.invalidate()
2144 2145 return len(self.heads()) + 1
2145 2146 finally:
2146 2147 lock.release()
2147 2148
2148 2149 def clone(self, remote, heads=[], stream=False):
2149 2150 '''clone remote repository.
2150 2151
2151 2152 keyword arguments:
2152 2153 heads: list of revs to clone (forces use of pull)
2153 2154 stream: use streaming clone if possible'''
2154 2155
2155 2156 # now, all clients that can request uncompressed clones can
2156 2157 # read repo formats supported by all servers that can serve
2157 2158 # them.
2158 2159
2159 2160 # if revlog format changes, client will have to check version
2160 2161 # and format flags on "stream" capability, and use
2161 2162 # uncompressed only if compatible.
2162 2163
2163 2164 if stream and not heads:
2164 2165 # 'stream' means remote revlog format is revlogv1 only
2165 2166 if remote.capable('stream'):
2166 2167 return self.stream_in(remote, set(('revlogv1',)))
2167 2168 # otherwise, 'streamreqs' contains the remote revlog format
2168 2169 streamreqs = remote.capable('streamreqs')
2169 2170 if streamreqs:
2170 2171 streamreqs = set(streamreqs.split(','))
2171 2172 # if we support it, stream in and adjust our requirements
2172 2173 if not streamreqs - self.supportedformats:
2173 2174 return self.stream_in(remote, streamreqs)
2174 2175 return self.pull(remote, heads)
2175 2176
2176 2177 def pushkey(self, namespace, key, old, new):
2177 2178 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2178 2179 old=old, new=new)
2179 2180 ret = pushkey.push(self, namespace, key, old, new)
2180 2181 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2181 2182 ret=ret)
2182 2183 return ret
2183 2184
2184 2185 def listkeys(self, namespace):
2185 2186 self.hook('prelistkeys', throw=True, namespace=namespace)
2186 2187 values = pushkey.list(self, namespace)
2187 2188 self.hook('listkeys', namespace=namespace, values=values)
2188 2189 return values
2189 2190
2190 2191 def debugwireargs(self, one, two, three=None, four=None, five=None):
2191 2192 '''used to test argument passing over the wire'''
2192 2193 return "%s %s %s %s %s" % (one, two, three, four, five)
2193 2194
2194 2195 def savecommitmessage(self, text):
2195 2196 fp = self.opener('last-message.txt', 'wb')
2196 2197 try:
2197 2198 fp.write(text)
2198 2199 finally:
2199 2200 fp.close()
2200 2201 return self.pathto(fp.name[len(self.root)+1:])
2201 2202
2202 2203 # used to avoid circular references so destructors work
2203 2204 def aftertrans(files):
2204 2205 renamefiles = [tuple(t) for t in files]
2205 2206 def a():
2206 2207 for src, dest in renamefiles:
2207 2208 util.rename(src, dest)
2208 2209 return a
2209 2210
2210 2211 def undoname(fn):
2211 2212 base, name = os.path.split(fn)
2212 2213 assert name.startswith('journal')
2213 2214 return os.path.join(base, name.replace('journal', 'undo', 1))
2214 2215
2215 2216 def instance(ui, path, create):
2216 2217 return localrepository(ui, util.urllocalpath(path), create)
2217 2218
2218 2219 def islocal(path):
2219 2220 return True
@@ -1,813 +1,814 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import util, error, osutil, revset, similar, encoding
10 10 import match as matchmod
11 11 import os, errno, re, stat, sys, glob
12 12
13 13 def checkfilename(f):
14 14 '''Check that the filename f is an acceptable filename for a tracked file'''
15 15 if '\r' in f or '\n' in f:
16 16 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
17 17
18 18 def checkportable(ui, f):
19 19 '''Check if filename f is portable and warn or abort depending on config'''
20 20 checkfilename(f)
21 21 abort, warn = checkportabilityalert(ui)
22 22 if abort or warn:
23 23 msg = util.checkwinfilename(f)
24 24 if msg:
25 25 msg = "%s: %r" % (msg, f)
26 26 if abort:
27 27 raise util.Abort(msg)
28 28 ui.warn(_("warning: %s\n") % msg)
29 29
30 30 def checkportabilityalert(ui):
31 31 '''check if the user's config requests nothing, a warning, or abort for
32 32 non-portable filenames'''
33 33 val = ui.config('ui', 'portablefilenames', 'warn')
34 34 lval = val.lower()
35 35 bval = util.parsebool(val)
36 36 abort = os.name == 'nt' or lval == 'abort'
37 37 warn = bval or lval == 'warn'
38 38 if bval is None and not (warn or abort or lval == 'ignore'):
39 39 raise error.ConfigError(
40 40 _("ui.portablefilenames value is invalid ('%s')") % val)
41 41 return abort, warn
42 42
43 43 class casecollisionauditor(object):
44 44 def __init__(self, ui, abort, existingiter):
45 45 self._ui = ui
46 46 self._abort = abort
47 47 self._map = {}
48 48 for f in existingiter:
49 49 self._map[encoding.lower(f)] = f
50 50
51 51 def __call__(self, f):
52 52 fl = encoding.lower(f)
53 53 map = self._map
54 54 if fl in map and map[fl] != f:
55 55 msg = _('possible case-folding collision for %s') % f
56 56 if self._abort:
57 57 raise util.Abort(msg)
58 58 self._ui.warn(_("warning: %s\n") % msg)
59 59 map[fl] = f
60 60
61 61 class pathauditor(object):
62 62 '''ensure that a filesystem path contains no banned components.
63 63 the following properties of a path are checked:
64 64
65 65 - ends with a directory separator
66 66 - under top-level .hg
67 67 - starts at the root of a windows drive
68 68 - contains ".."
69 69 - traverses a symlink (e.g. a/symlink_here/b)
70 70 - inside a nested repository (a callback can be used to approve
71 71 some nested repositories, e.g., subrepositories)
72 72 '''
73 73
74 74 def __init__(self, root, callback=None):
75 75 self.audited = set()
76 76 self.auditeddir = set()
77 77 self.root = root
78 78 self.callback = callback
79 79 if os.path.lexists(root) and not util.checkcase(root):
80 80 self.normcase = util.normcase
81 81 else:
82 82 self.normcase = lambda x: x
83 83
84 84 def __call__(self, path):
85 85 '''Check the relative path.
86 86 path may contain a pattern (e.g. foodir/**.txt)'''
87 87
88 path = util.localpath(path)
88 89 normpath = self.normcase(path)
89 90 if normpath in self.audited:
90 91 return
91 92 # AIX ignores "/" at end of path, others raise EISDIR.
92 93 if util.endswithsep(path):
93 94 raise util.Abort(_("path ends in directory separator: %s") % path)
94 95 parts = util.splitpath(path)
95 96 if (os.path.splitdrive(path)[0]
96 97 or parts[0].lower() in ('.hg', '.hg.', '')
97 98 or os.pardir in parts):
98 99 raise util.Abort(_("path contains illegal component: %s") % path)
99 100 if '.hg' in path.lower():
100 101 lparts = [p.lower() for p in parts]
101 102 for p in '.hg', '.hg.':
102 103 if p in lparts[1:]:
103 104 pos = lparts.index(p)
104 105 base = os.path.join(*parts[:pos])
105 106 raise util.Abort(_("path '%s' is inside nested repo %r")
106 107 % (path, base))
107 108
108 109 normparts = util.splitpath(normpath)
109 110 assert len(parts) == len(normparts)
110 111
111 112 parts.pop()
112 113 normparts.pop()
113 114 prefixes = []
114 115 while parts:
115 116 prefix = os.sep.join(parts)
116 117 normprefix = os.sep.join(normparts)
117 118 if normprefix in self.auditeddir:
118 119 break
119 120 curpath = os.path.join(self.root, prefix)
120 121 try:
121 122 st = os.lstat(curpath)
122 123 except OSError, err:
123 124 # EINVAL can be raised as invalid path syntax under win32.
124 125 # They must be ignored for patterns can be checked too.
125 126 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
126 127 raise
127 128 else:
128 129 if stat.S_ISLNK(st.st_mode):
129 130 raise util.Abort(
130 131 _('path %r traverses symbolic link %r')
131 132 % (path, prefix))
132 133 elif (stat.S_ISDIR(st.st_mode) and
133 134 os.path.isdir(os.path.join(curpath, '.hg'))):
134 135 if not self.callback or not self.callback(curpath):
135 136 raise util.Abort(_("path '%s' is inside nested repo %r") %
136 137 (path, prefix))
137 138 prefixes.append(normprefix)
138 139 parts.pop()
139 140 normparts.pop()
140 141
141 142 self.audited.add(normpath)
142 143 # only add prefixes to the cache after checking everything: we don't
143 144 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
144 145 self.auditeddir.update(prefixes)
145 146
146 147 class abstractopener(object):
147 148 """Abstract base class; cannot be instantiated"""
148 149
149 150 def __init__(self, *args, **kwargs):
150 151 '''Prevent instantiation; don't call this from subclasses.'''
151 152 raise NotImplementedError('attempted instantiating ' + str(type(self)))
152 153
153 154 def read(self, path):
154 155 fp = self(path, 'rb')
155 156 try:
156 157 return fp.read()
157 158 finally:
158 159 fp.close()
159 160
160 161 def write(self, path, data):
161 162 fp = self(path, 'wb')
162 163 try:
163 164 return fp.write(data)
164 165 finally:
165 166 fp.close()
166 167
167 168 def append(self, path, data):
168 169 fp = self(path, 'ab')
169 170 try:
170 171 return fp.write(data)
171 172 finally:
172 173 fp.close()
173 174
174 175 class opener(abstractopener):
175 176 '''Open files relative to a base directory
176 177
177 178 This class is used to hide the details of COW semantics and
178 179 remote file access from higher level code.
179 180 '''
180 181 def __init__(self, base, audit=True):
181 182 self.base = base
182 183 self._audit = audit
183 184 if audit:
184 185 self.auditor = pathauditor(base)
185 186 else:
186 187 self.auditor = util.always
187 188 self.createmode = None
188 189 self._trustnlink = None
189 190
190 191 @util.propertycache
191 192 def _cansymlink(self):
192 193 return util.checklink(self.base)
193 194
194 195 def _fixfilemode(self, name):
195 196 if self.createmode is None:
196 197 return
197 198 os.chmod(name, self.createmode & 0666)
198 199
199 200 def __call__(self, path, mode="r", text=False, atomictemp=False):
200 201 if self._audit:
201 202 r = util.checkosfilename(path)
202 203 if r:
203 204 raise util.Abort("%s: %r" % (r, path))
204 205 self.auditor(path)
205 206 f = os.path.join(self.base, path)
206 207
207 208 if not text and "b" not in mode:
208 209 mode += "b" # for that other OS
209 210
210 211 nlink = -1
211 212 dirname, basename = os.path.split(f)
212 213 # If basename is empty, then the path is malformed because it points
213 214 # to a directory. Let the posixfile() call below raise IOError.
214 215 if basename and mode not in ('r', 'rb'):
215 216 if atomictemp:
216 217 if not os.path.isdir(dirname):
217 218 util.makedirs(dirname, self.createmode)
218 219 return util.atomictempfile(f, mode, self.createmode)
219 220 try:
220 221 if 'w' in mode:
221 222 util.unlink(f)
222 223 nlink = 0
223 224 else:
224 225 # nlinks() may behave differently for files on Windows
225 226 # shares if the file is open.
226 227 fd = util.posixfile(f)
227 228 nlink = util.nlinks(f)
228 229 if nlink < 1:
229 230 nlink = 2 # force mktempcopy (issue1922)
230 231 fd.close()
231 232 except (OSError, IOError), e:
232 233 if e.errno != errno.ENOENT:
233 234 raise
234 235 nlink = 0
235 236 if not os.path.isdir(dirname):
236 237 util.makedirs(dirname, self.createmode)
237 238 if nlink > 0:
238 239 if self._trustnlink is None:
239 240 self._trustnlink = nlink > 1 or util.checknlink(f)
240 241 if nlink > 1 or not self._trustnlink:
241 242 util.rename(util.mktempcopy(f), f)
242 243 fp = util.posixfile(f, mode)
243 244 if nlink == 0:
244 245 self._fixfilemode(f)
245 246 return fp
246 247
247 248 def symlink(self, src, dst):
248 249 self.auditor(dst)
249 250 linkname = os.path.join(self.base, dst)
250 251 try:
251 252 os.unlink(linkname)
252 253 except OSError:
253 254 pass
254 255
255 256 dirname = os.path.dirname(linkname)
256 257 if not os.path.exists(dirname):
257 258 util.makedirs(dirname, self.createmode)
258 259
259 260 if self._cansymlink:
260 261 try:
261 262 os.symlink(src, linkname)
262 263 except OSError, err:
263 264 raise OSError(err.errno, _('could not symlink to %r: %s') %
264 265 (src, err.strerror), linkname)
265 266 else:
266 267 f = self(dst, "w")
267 268 f.write(src)
268 269 f.close()
269 270 self._fixfilemode(dst)
270 271
271 272 def audit(self, path):
272 273 self.auditor(path)
273 274
274 275 class filteropener(abstractopener):
275 276 '''Wrapper opener for filtering filenames with a function.'''
276 277
277 278 def __init__(self, opener, filter):
278 279 self._filter = filter
279 280 self._orig = opener
280 281
281 282 def __call__(self, path, *args, **kwargs):
282 283 return self._orig(self._filter(path), *args, **kwargs)
283 284
284 285 def canonpath(root, cwd, myname, auditor=None):
285 286 '''return the canonical path of myname, given cwd and root'''
286 287 if util.endswithsep(root):
287 288 rootsep = root
288 289 else:
289 290 rootsep = root + os.sep
290 291 name = myname
291 292 if not os.path.isabs(name):
292 293 name = os.path.join(root, cwd, name)
293 294 name = os.path.normpath(name)
294 295 if auditor is None:
295 296 auditor = pathauditor(root)
296 297 if name != rootsep and name.startswith(rootsep):
297 298 name = name[len(rootsep):]
298 299 auditor(name)
299 300 return util.pconvert(name)
300 301 elif name == root:
301 302 return ''
302 303 else:
303 304 # Determine whether `name' is in the hierarchy at or beneath `root',
304 305 # by iterating name=dirname(name) until that causes no change (can't
305 306 # check name == '/', because that doesn't work on windows). For each
306 307 # `name', compare dev/inode numbers. If they match, the list `rel'
307 308 # holds the reversed list of components making up the relative file
308 309 # name we want.
309 310 root_st = os.stat(root)
310 311 rel = []
311 312 while True:
312 313 try:
313 314 name_st = os.stat(name)
314 315 except OSError:
315 316 break
316 317 if util.samestat(name_st, root_st):
317 318 if not rel:
318 319 # name was actually the same as root (maybe a symlink)
319 320 return ''
320 321 rel.reverse()
321 322 name = os.path.join(*rel)
322 323 auditor(name)
323 324 return util.pconvert(name)
324 325 dirname, basename = os.path.split(name)
325 326 rel.append(basename)
326 327 if dirname == name:
327 328 break
328 329 name = dirname
329 330
330 331 raise util.Abort('%s not under root' % myname)
331 332
332 333 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
333 334 '''yield every hg repository under path, recursively.'''
334 335 def errhandler(err):
335 336 if err.filename == path:
336 337 raise err
337 338 samestat = getattr(os.path, 'samestat', None)
338 339 if followsym and samestat is not None:
339 340 def adddir(dirlst, dirname):
340 341 match = False
341 342 dirstat = os.stat(dirname)
342 343 for lstdirstat in dirlst:
343 344 if samestat(dirstat, lstdirstat):
344 345 match = True
345 346 break
346 347 if not match:
347 348 dirlst.append(dirstat)
348 349 return not match
349 350 else:
350 351 followsym = False
351 352
352 353 if (seen_dirs is None) and followsym:
353 354 seen_dirs = []
354 355 adddir(seen_dirs, path)
355 356 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
356 357 dirs.sort()
357 358 if '.hg' in dirs:
358 359 yield root # found a repository
359 360 qroot = os.path.join(root, '.hg', 'patches')
360 361 if os.path.isdir(os.path.join(qroot, '.hg')):
361 362 yield qroot # we have a patch queue repo here
362 363 if recurse:
363 364 # avoid recursing inside the .hg directory
364 365 dirs.remove('.hg')
365 366 else:
366 367 dirs[:] = [] # don't descend further
367 368 elif followsym:
368 369 newdirs = []
369 370 for d in dirs:
370 371 fname = os.path.join(root, d)
371 372 if adddir(seen_dirs, fname):
372 373 if os.path.islink(fname):
373 374 for hgname in walkrepos(fname, True, seen_dirs):
374 375 yield hgname
375 376 else:
376 377 newdirs.append(d)
377 378 dirs[:] = newdirs
378 379
379 380 def osrcpath():
380 381 '''return default os-specific hgrc search path'''
381 382 path = systemrcpath()
382 383 path.extend(userrcpath())
383 384 path = [os.path.normpath(f) for f in path]
384 385 return path
385 386
386 387 _rcpath = None
387 388
388 389 def rcpath():
389 390 '''return hgrc search path. if env var HGRCPATH is set, use it.
390 391 for each item in path, if directory, use files ending in .rc,
391 392 else use item.
392 393 make HGRCPATH empty to only look in .hg/hgrc of current repo.
393 394 if no HGRCPATH, use default os-specific path.'''
394 395 global _rcpath
395 396 if _rcpath is None:
396 397 if 'HGRCPATH' in os.environ:
397 398 _rcpath = []
398 399 for p in os.environ['HGRCPATH'].split(os.pathsep):
399 400 if not p:
400 401 continue
401 402 p = util.expandpath(p)
402 403 if os.path.isdir(p):
403 404 for f, kind in osutil.listdir(p):
404 405 if f.endswith('.rc'):
405 406 _rcpath.append(os.path.join(p, f))
406 407 else:
407 408 _rcpath.append(p)
408 409 else:
409 410 _rcpath = osrcpath()
410 411 return _rcpath
411 412
412 413 if os.name != 'nt':
413 414
414 415 def rcfiles(path):
415 416 rcs = [os.path.join(path, 'hgrc')]
416 417 rcdir = os.path.join(path, 'hgrc.d')
417 418 try:
418 419 rcs.extend([os.path.join(rcdir, f)
419 420 for f, kind in osutil.listdir(rcdir)
420 421 if f.endswith(".rc")])
421 422 except OSError:
422 423 pass
423 424 return rcs
424 425
425 426 def systemrcpath():
426 427 path = []
427 428 # old mod_python does not set sys.argv
428 429 if len(getattr(sys, 'argv', [])) > 0:
429 430 p = os.path.dirname(os.path.dirname(sys.argv[0]))
430 431 path.extend(rcfiles(os.path.join(p, 'etc/mercurial')))
431 432 path.extend(rcfiles('/etc/mercurial'))
432 433 return path
433 434
434 435 def userrcpath():
435 436 return [os.path.expanduser('~/.hgrc')]
436 437
437 438 else:
438 439
439 440 _HKEY_LOCAL_MACHINE = 0x80000002L
440 441
441 442 def systemrcpath():
442 443 '''return default os-specific hgrc search path'''
443 444 rcpath = []
444 445 filename = util.executablepath()
445 446 # Use mercurial.ini found in directory with hg.exe
446 447 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
447 448 if os.path.isfile(progrc):
448 449 rcpath.append(progrc)
449 450 return rcpath
450 451 # Use hgrc.d found in directory with hg.exe
451 452 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
452 453 if os.path.isdir(progrcd):
453 454 for f, kind in osutil.listdir(progrcd):
454 455 if f.endswith('.rc'):
455 456 rcpath.append(os.path.join(progrcd, f))
456 457 return rcpath
457 458 # else look for a system rcpath in the registry
458 459 value = util.lookupreg('SOFTWARE\\Mercurial', None,
459 460 _HKEY_LOCAL_MACHINE)
460 461 if not isinstance(value, str) or not value:
461 462 return rcpath
462 463 value = value.replace('/', os.sep)
463 464 for p in value.split(os.pathsep):
464 465 if p.lower().endswith('mercurial.ini'):
465 466 rcpath.append(p)
466 467 elif os.path.isdir(p):
467 468 for f, kind in osutil.listdir(p):
468 469 if f.endswith('.rc'):
469 470 rcpath.append(os.path.join(p, f))
470 471 return rcpath
471 472
472 473 def userrcpath():
473 474 '''return os-specific hgrc search path to the user dir'''
474 475 home = os.path.expanduser('~')
475 476 path = [os.path.join(home, 'mercurial.ini'),
476 477 os.path.join(home, '.hgrc')]
477 478 userprofile = os.environ.get('USERPROFILE')
478 479 if userprofile:
479 480 path.append(os.path.join(userprofile, 'mercurial.ini'))
480 481 path.append(os.path.join(userprofile, '.hgrc'))
481 482 return path
482 483
483 484 def revsingle(repo, revspec, default='.'):
484 485 if not revspec:
485 486 return repo[default]
486 487
487 488 l = revrange(repo, [revspec])
488 489 if len(l) < 1:
489 490 raise util.Abort(_('empty revision set'))
490 491 return repo[l[-1]]
491 492
492 493 def revpair(repo, revs):
493 494 if not revs:
494 495 return repo.dirstate.p1(), None
495 496
496 497 l = revrange(repo, revs)
497 498
498 499 if len(l) == 0:
499 500 return repo.dirstate.p1(), None
500 501
501 502 if len(l) == 1:
502 503 return repo.lookup(l[0]), None
503 504
504 505 return repo.lookup(l[0]), repo.lookup(l[-1])
505 506
506 507 _revrangesep = ':'
507 508
508 509 def revrange(repo, revs):
509 510 """Yield revision as strings from a list of revision specifications."""
510 511
511 512 def revfix(repo, val, defval):
512 513 if not val and val != 0 and defval is not None:
513 514 return defval
514 515 return repo.changelog.rev(repo.lookup(val))
515 516
516 517 seen, l = set(), []
517 518 for spec in revs:
518 519 # attempt to parse old-style ranges first to deal with
519 520 # things like old-tag which contain query metacharacters
520 521 try:
521 522 if isinstance(spec, int):
522 523 seen.add(spec)
523 524 l.append(spec)
524 525 continue
525 526
526 527 if _revrangesep in spec:
527 528 start, end = spec.split(_revrangesep, 1)
528 529 start = revfix(repo, start, 0)
529 530 end = revfix(repo, end, len(repo) - 1)
530 531 step = start > end and -1 or 1
531 532 for rev in xrange(start, end + step, step):
532 533 if rev in seen:
533 534 continue
534 535 seen.add(rev)
535 536 l.append(rev)
536 537 continue
537 538 elif spec and spec in repo: # single unquoted rev
538 539 rev = revfix(repo, spec, None)
539 540 if rev in seen:
540 541 continue
541 542 seen.add(rev)
542 543 l.append(rev)
543 544 continue
544 545 except error.RepoLookupError:
545 546 pass
546 547
547 548 # fall through to new-style queries if old-style fails
548 549 m = revset.match(repo.ui, spec)
549 550 for r in m(repo, range(len(repo))):
550 551 if r not in seen:
551 552 l.append(r)
552 553 seen.update(l)
553 554
554 555 return l
555 556
556 557 def expandpats(pats):
557 558 if not util.expandglobs:
558 559 return list(pats)
559 560 ret = []
560 561 for p in pats:
561 562 kind, name = matchmod._patsplit(p, None)
562 563 if kind is None:
563 564 try:
564 565 globbed = glob.glob(name)
565 566 except re.error:
566 567 globbed = [name]
567 568 if globbed:
568 569 ret.extend(globbed)
569 570 continue
570 571 ret.append(p)
571 572 return ret
572 573
573 574 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
574 575 if pats == ("",):
575 576 pats = []
576 577 if not globbed and default == 'relpath':
577 578 pats = expandpats(pats or [])
578 579
579 580 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
580 581 default)
581 582 def badfn(f, msg):
582 583 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
583 584 m.bad = badfn
584 585 return m
585 586
586 587 def matchall(repo):
587 588 return matchmod.always(repo.root, repo.getcwd())
588 589
589 590 def matchfiles(repo, files):
590 591 return matchmod.exact(repo.root, repo.getcwd(), files)
591 592
592 593 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
593 594 if dry_run is None:
594 595 dry_run = opts.get('dry_run')
595 596 if similarity is None:
596 597 similarity = float(opts.get('similarity') or 0)
597 598 # we'd use status here, except handling of symlinks and ignore is tricky
598 599 added, unknown, deleted, removed = [], [], [], []
599 600 audit_path = pathauditor(repo.root)
600 601 m = match(repo[None], pats, opts)
601 602 for abs in repo.walk(m):
602 603 target = repo.wjoin(abs)
603 604 good = True
604 605 try:
605 606 audit_path(abs)
606 607 except (OSError, util.Abort):
607 608 good = False
608 609 rel = m.rel(abs)
609 610 exact = m.exact(abs)
610 611 if good and abs not in repo.dirstate:
611 612 unknown.append(abs)
612 613 if repo.ui.verbose or not exact:
613 614 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
614 615 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
615 616 or (os.path.isdir(target) and not os.path.islink(target))):
616 617 deleted.append(abs)
617 618 if repo.ui.verbose or not exact:
618 619 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
619 620 # for finding renames
620 621 elif repo.dirstate[abs] == 'r':
621 622 removed.append(abs)
622 623 elif repo.dirstate[abs] == 'a':
623 624 added.append(abs)
624 625 copies = {}
625 626 if similarity > 0:
626 627 for old, new, score in similar.findrenames(repo,
627 628 added + unknown, removed + deleted, similarity):
628 629 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
629 630 repo.ui.status(_('recording removal of %s as rename to %s '
630 631 '(%d%% similar)\n') %
631 632 (m.rel(old), m.rel(new), score * 100))
632 633 copies[new] = old
633 634
634 635 if not dry_run:
635 636 wctx = repo[None]
636 637 wlock = repo.wlock()
637 638 try:
638 639 wctx.forget(deleted)
639 640 wctx.add(unknown)
640 641 for new, old in copies.iteritems():
641 642 wctx.copy(old, new)
642 643 finally:
643 644 wlock.release()
644 645
645 646 def updatedir(ui, repo, patches, similarity=0):
646 647 '''Update dirstate after patch application according to metadata'''
647 648 if not patches:
648 649 return []
649 650 copies = []
650 651 removes = set()
651 652 cfiles = patches.keys()
652 653 cwd = repo.getcwd()
653 654 if cwd:
654 655 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
655 656 for f in patches:
656 657 gp = patches[f]
657 658 if not gp:
658 659 continue
659 660 if gp.op == 'RENAME':
660 661 copies.append((gp.oldpath, gp.path))
661 662 removes.add(gp.oldpath)
662 663 elif gp.op == 'COPY':
663 664 copies.append((gp.oldpath, gp.path))
664 665 elif gp.op == 'DELETE':
665 666 removes.add(gp.path)
666 667
667 668 wctx = repo[None]
668 669 for src, dst in copies:
669 670 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
670 671 if (not similarity) and removes:
671 672 wctx.remove(sorted(removes), True)
672 673
673 674 for f in patches:
674 675 gp = patches[f]
675 676 if gp and gp.mode:
676 677 islink, isexec = gp.mode
677 678 dst = repo.wjoin(gp.path)
678 679 # patch won't create empty files
679 680 if gp.op == 'ADD' and not os.path.lexists(dst):
680 681 flags = (isexec and 'x' or '') + (islink and 'l' or '')
681 682 repo.wwrite(gp.path, '', flags)
682 683 util.setflags(dst, islink, isexec)
683 684 addremove(repo, cfiles, similarity=similarity)
684 685 files = patches.keys()
685 686 files.extend([r for r in removes if r not in files])
686 687 return sorted(files)
687 688
688 689 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
689 690 """Update the dirstate to reflect the intent of copying src to dst. For
690 691 different reasons it might not end with dst being marked as copied from src.
691 692 """
692 693 origsrc = repo.dirstate.copied(src) or src
693 694 if dst == origsrc: # copying back a copy?
694 695 if repo.dirstate[dst] not in 'mn' and not dryrun:
695 696 repo.dirstate.normallookup(dst)
696 697 else:
697 698 if repo.dirstate[origsrc] == 'a' and origsrc == src:
698 699 if not ui.quiet:
699 700 ui.warn(_("%s has not been committed yet, so no copy "
700 701 "data will be stored for %s.\n")
701 702 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
702 703 if repo.dirstate[dst] in '?r' and not dryrun:
703 704 wctx.add([dst])
704 705 elif not dryrun:
705 706 wctx.copy(origsrc, dst)
706 707
707 708 def readrequires(opener, supported):
708 709 '''Reads and parses .hg/requires and checks if all entries found
709 710 are in the list of supported features.'''
710 711 requirements = set(opener.read("requires").splitlines())
711 712 missings = []
712 713 for r in requirements:
713 714 if r not in supported:
714 715 if not r or not r[0].isalnum():
715 716 raise error.RequirementError(_(".hg/requires file is corrupt"))
716 717 missings.append(r)
717 718 missings.sort()
718 719 if missings:
719 720 raise error.RequirementError(_("unknown repository format: "
720 721 "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
721 722 return requirements
722 723
723 724 class filecacheentry(object):
724 725 def __init__(self, path):
725 726 self.path = path
726 727 self.cachestat = filecacheentry.stat(self.path)
727 728
728 729 if self.cachestat:
729 730 self._cacheable = self.cachestat.cacheable()
730 731 else:
731 732 # None means we don't know yet
732 733 self._cacheable = None
733 734
734 735 def refresh(self):
735 736 if self.cacheable():
736 737 self.cachestat = filecacheentry.stat(self.path)
737 738
738 739 def cacheable(self):
739 740 if self._cacheable is not None:
740 741 return self._cacheable
741 742
742 743 # we don't know yet, assume it is for now
743 744 return True
744 745
745 746 def changed(self):
746 747 # no point in going further if we can't cache it
747 748 if not self.cacheable():
748 749 return True
749 750
750 751 newstat = filecacheentry.stat(self.path)
751 752
752 753 # we may not know if it's cacheable yet, check again now
753 754 if newstat and self._cacheable is None:
754 755 self._cacheable = newstat.cacheable()
755 756
756 757 # check again
757 758 if not self._cacheable:
758 759 return True
759 760
760 761 if self.cachestat != newstat:
761 762 self.cachestat = newstat
762 763 return True
763 764 else:
764 765 return False
765 766
766 767 @staticmethod
767 768 def stat(path):
768 769 try:
769 770 return util.cachestat(path)
770 771 except OSError, e:
771 772 if e.errno != errno.ENOENT:
772 773 raise
773 774
774 775 class filecache(object):
775 776 '''A property like decorator that tracks a file under .hg/ for updates.
776 777
777 778 Records stat info when called in _filecache.
778 779
779 780 On subsequent calls, compares old stat info with new info, and recreates
780 781 the object when needed, updating the new stat info in _filecache.
781 782
782 783 Mercurial either atomic renames or appends for files under .hg,
783 784 so to ensure the cache is reliable we need the filesystem to be able
784 785 to tell us if a file has been replaced. If it can't, we fallback to
785 786 recreating the object on every call (essentially the same behaviour as
786 787 propertycache).'''
787 788 def __init__(self, path, instore=False):
788 789 self.path = path
789 790 self.instore = instore
790 791
791 792 def __call__(self, func):
792 793 self.func = func
793 794 self.name = func.__name__
794 795 return self
795 796
796 797 def __get__(self, obj, type=None):
797 798 entry = obj._filecache.get(self.name)
798 799
799 800 if entry:
800 801 if entry.changed():
801 802 entry.obj = self.func(obj)
802 803 else:
803 804 path = self.instore and obj.sjoin(self.path) or obj.join(self.path)
804 805
805 806 # We stat -before- creating the object so our cache doesn't lie if
806 807 # a writer modified between the time we read and stat
807 808 entry = filecacheentry(path)
808 809 entry.obj = self.func(obj)
809 810
810 811 obj._filecache[self.name] = entry
811 812
812 813 setattr(obj, self.name, entry.obj)
813 814 return entry.obj
@@ -1,1149 +1,1149 b''
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import errno, os, re, xml.dom.minidom, shutil, posixpath
9 9 import stat, subprocess, tarfile
10 10 from i18n import _
11 11 import config, scmutil, util, node, error, cmdutil, bookmarks
12 12 hg = None
13 13 propertycache = util.propertycache
14 14
15 15 nullstate = ('', '', 'empty')
16 16
17 17 def state(ctx, ui):
18 18 """return a state dict, mapping subrepo paths configured in .hgsub
19 19 to tuple: (source from .hgsub, revision from .hgsubstate, kind
20 20 (key in types dict))
21 21 """
22 22 p = config.config()
23 23 def read(f, sections=None, remap=None):
24 24 if f in ctx:
25 25 try:
26 26 data = ctx[f].data()
27 27 except IOError, err:
28 28 if err.errno != errno.ENOENT:
29 29 raise
30 30 # handle missing subrepo spec files as removed
31 31 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
32 32 return
33 33 p.parse(f, data, sections, remap, read)
34 34 else:
35 35 raise util.Abort(_("subrepo spec file %s not found") % f)
36 36
37 37 if '.hgsub' in ctx:
38 38 read('.hgsub')
39 39
40 40 for path, src in ui.configitems('subpaths'):
41 41 p.set('subpaths', path, src, ui.configsource('subpaths', path))
42 42
43 43 rev = {}
44 44 if '.hgsubstate' in ctx:
45 45 try:
46 46 for l in ctx['.hgsubstate'].data().splitlines():
47 47 revision, path = l.split(" ", 1)
48 48 rev[path] = revision
49 49 except IOError, err:
50 50 if err.errno != errno.ENOENT:
51 51 raise
52 52
53 53 def remap(src):
54 54 for pattern, repl in p.items('subpaths'):
55 55 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
56 56 # does a string decode.
57 57 repl = repl.encode('string-escape')
58 58 # However, we still want to allow back references to go
59 59 # through unharmed, so we turn r'\\1' into r'\1'. Again,
60 60 # extra escapes are needed because re.sub string decodes.
61 61 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
62 62 try:
63 63 src = re.sub(pattern, repl, src, 1)
64 64 except re.error, e:
65 65 raise util.Abort(_("bad subrepository pattern in %s: %s")
66 66 % (p.source('subpaths', pattern), e))
67 67 return src
68 68
69 69 state = {}
70 70 for path, src in p[''].items():
71 71 kind = 'hg'
72 72 if src.startswith('['):
73 73 if ']' not in src:
74 74 raise util.Abort(_('missing ] in subrepo source'))
75 75 kind, src = src.split(']', 1)
76 76 kind = kind[1:]
77 77 src = src.lstrip() # strip any extra whitespace after ']'
78 78
79 79 if not util.url(src).isabs():
80 80 parent = _abssource(ctx._repo, abort=False)
81 81 if parent:
82 82 parent = util.url(parent)
83 83 parent.path = posixpath.join(parent.path or '', src)
84 84 parent.path = posixpath.normpath(parent.path)
85 85 joined = str(parent)
86 86 # Remap the full joined path and use it if it changes,
87 87 # else remap the original source.
88 88 remapped = remap(joined)
89 89 if remapped == joined:
90 90 src = remap(src)
91 91 else:
92 92 src = remapped
93 93
94 94 src = remap(src)
95 state[path] = (src.strip(), rev.get(path, ''), kind)
95 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
96 96
97 97 return state
98 98
99 99 def writestate(repo, state):
100 100 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
101 101 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
102 102 repo.wwrite('.hgsubstate', ''.join(lines), '')
103 103
104 104 def submerge(repo, wctx, mctx, actx, overwrite):
105 105 """delegated from merge.applyupdates: merging of .hgsubstate file
106 106 in working context, merging context and ancestor context"""
107 107 if mctx == actx: # backwards?
108 108 actx = wctx.p1()
109 109 s1 = wctx.substate
110 110 s2 = mctx.substate
111 111 sa = actx.substate
112 112 sm = {}
113 113
114 114 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
115 115
116 116 def debug(s, msg, r=""):
117 117 if r:
118 118 r = "%s:%s:%s" % r
119 119 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
120 120
121 121 for s, l in s1.items():
122 122 a = sa.get(s, nullstate)
123 123 ld = l # local state with possible dirty flag for compares
124 124 if wctx.sub(s).dirty():
125 125 ld = (l[0], l[1] + "+")
126 126 if wctx == actx: # overwrite
127 127 a = ld
128 128
129 129 if s in s2:
130 130 r = s2[s]
131 131 if ld == r or r == a: # no change or local is newer
132 132 sm[s] = l
133 133 continue
134 134 elif ld == a: # other side changed
135 135 debug(s, "other changed, get", r)
136 136 wctx.sub(s).get(r, overwrite)
137 137 sm[s] = r
138 138 elif ld[0] != r[0]: # sources differ
139 139 if repo.ui.promptchoice(
140 140 _(' subrepository sources for %s differ\n'
141 141 'use (l)ocal source (%s) or (r)emote source (%s)?')
142 142 % (s, l[0], r[0]),
143 143 (_('&Local'), _('&Remote')), 0):
144 144 debug(s, "prompt changed, get", r)
145 145 wctx.sub(s).get(r, overwrite)
146 146 sm[s] = r
147 147 elif ld[1] == a[1]: # local side is unchanged
148 148 debug(s, "other side changed, get", r)
149 149 wctx.sub(s).get(r, overwrite)
150 150 sm[s] = r
151 151 else:
152 152 debug(s, "both sides changed, merge with", r)
153 153 wctx.sub(s).merge(r)
154 154 sm[s] = l
155 155 elif ld == a: # remote removed, local unchanged
156 156 debug(s, "remote removed, remove")
157 157 wctx.sub(s).remove()
158 158 elif a == nullstate: # not present in remote or ancestor
159 159 debug(s, "local added, keep")
160 160 sm[s] = l
161 161 continue
162 162 else:
163 163 if repo.ui.promptchoice(
164 164 _(' local changed subrepository %s which remote removed\n'
165 165 'use (c)hanged version or (d)elete?') % s,
166 166 (_('&Changed'), _('&Delete')), 0):
167 167 debug(s, "prompt remove")
168 168 wctx.sub(s).remove()
169 169
170 170 for s, r in sorted(s2.items()):
171 171 if s in s1:
172 172 continue
173 173 elif s not in sa:
174 174 debug(s, "remote added, get", r)
175 175 mctx.sub(s).get(r)
176 176 sm[s] = r
177 177 elif r != sa[s]:
178 178 if repo.ui.promptchoice(
179 179 _(' remote changed subrepository %s which local removed\n'
180 180 'use (c)hanged version or (d)elete?') % s,
181 181 (_('&Changed'), _('&Delete')), 0) == 0:
182 182 debug(s, "prompt recreate", r)
183 183 wctx.sub(s).get(r)
184 184 sm[s] = r
185 185
186 186 # record merged .hgsubstate
187 187 writestate(repo, sm)
188 188
189 189 def _updateprompt(ui, sub, dirty, local, remote):
190 190 if dirty:
191 191 msg = (_(' subrepository sources for %s differ\n'
192 192 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
193 193 % (subrelpath(sub), local, remote))
194 194 else:
195 195 msg = (_(' subrepository sources for %s differ (in checked out version)\n'
196 196 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
197 197 % (subrelpath(sub), local, remote))
198 198 return ui.promptchoice(msg, (_('&Local'), _('&Remote')), 0)
199 199
200 200 def reporelpath(repo):
201 201 """return path to this (sub)repo as seen from outermost repo"""
202 202 parent = repo
203 203 while util.safehasattr(parent, '_subparent'):
204 204 parent = parent._subparent
205 205 p = parent.root.rstrip(os.sep)
206 206 return repo.root[len(p) + 1:]
207 207
208 208 def subrelpath(sub):
209 209 """return path to this subrepo as seen from outermost repo"""
210 210 if util.safehasattr(sub, '_relpath'):
211 211 return sub._relpath
212 212 if not util.safehasattr(sub, '_repo'):
213 213 return sub._path
214 214 return reporelpath(sub._repo)
215 215
216 216 def _abssource(repo, push=False, abort=True):
217 217 """return pull/push path of repo - either based on parent repo .hgsub info
218 218 or on the top repo config. Abort or return None if no source found."""
219 219 if util.safehasattr(repo, '_subparent'):
220 220 source = util.url(repo._subsource)
221 221 if source.isabs():
222 222 return str(source)
223 223 source.path = posixpath.normpath(source.path)
224 224 parent = _abssource(repo._subparent, push, abort=False)
225 225 if parent:
226 226 parent = util.url(util.pconvert(parent))
227 227 parent.path = posixpath.join(parent.path or '', source.path)
228 228 parent.path = posixpath.normpath(parent.path)
229 229 return str(parent)
230 230 else: # recursion reached top repo
231 231 if util.safehasattr(repo, '_subtoppath'):
232 232 return repo._subtoppath
233 233 if push and repo.ui.config('paths', 'default-push'):
234 234 return repo.ui.config('paths', 'default-push')
235 235 if repo.ui.config('paths', 'default'):
236 236 return repo.ui.config('paths', 'default')
237 237 if abort:
238 238 raise util.Abort(_("default path for subrepository %s not found") %
239 239 reporelpath(repo))
240 240
241 241 def itersubrepos(ctx1, ctx2):
242 242 """find subrepos in ctx1 or ctx2"""
243 243 # Create a (subpath, ctx) mapping where we prefer subpaths from
244 244 # ctx1. The subpaths from ctx2 are important when the .hgsub file
245 245 # has been modified (in ctx2) but not yet committed (in ctx1).
246 246 subpaths = dict.fromkeys(ctx2.substate, ctx2)
247 247 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
248 248 for subpath, ctx in sorted(subpaths.iteritems()):
249 249 yield subpath, ctx.sub(subpath)
250 250
251 251 def subrepo(ctx, path):
252 252 """return instance of the right subrepo class for subrepo in path"""
253 253 # subrepo inherently violates our import layering rules
254 254 # because it wants to make repo objects from deep inside the stack
255 255 # so we manually delay the circular imports to not break
256 256 # scripts that don't use our demand-loading
257 257 global hg
258 258 import hg as h
259 259 hg = h
260 260
261 261 scmutil.pathauditor(ctx._repo.root)(path)
262 262 state = ctx.substate.get(path, nullstate)
263 263 if state[2] not in types:
264 264 raise util.Abort(_('unknown subrepo type %s') % state[2])
265 265 return types[state[2]](ctx, path, state[:2])
266 266
267 267 # subrepo classes need to implement the following abstract class:
268 268
269 269 class abstractsubrepo(object):
270 270
271 271 def dirty(self, ignoreupdate=False):
272 272 """returns true if the dirstate of the subrepo is dirty or does not
273 273 match current stored state. If ignoreupdate is true, only check
274 274 whether the subrepo has uncommitted changes in its dirstate.
275 275 """
276 276 raise NotImplementedError
277 277
278 278 def checknested(self, path):
279 279 """check if path is a subrepository within this repository"""
280 280 return False
281 281
282 282 def commit(self, text, user, date):
283 283 """commit the current changes to the subrepo with the given
284 284 log message. Use given user and date if possible. Return the
285 285 new state of the subrepo.
286 286 """
287 287 raise NotImplementedError
288 288
289 289 def remove(self):
290 290 """remove the subrepo
291 291
292 292 (should verify the dirstate is not dirty first)
293 293 """
294 294 raise NotImplementedError
295 295
296 296 def get(self, state, overwrite=False):
297 297 """run whatever commands are needed to put the subrepo into
298 298 this state
299 299 """
300 300 raise NotImplementedError
301 301
302 302 def merge(self, state):
303 303 """merge currently-saved state with the new state."""
304 304 raise NotImplementedError
305 305
306 306 def push(self, opts):
307 307 """perform whatever action is analogous to 'hg push'
308 308
309 309 This may be a no-op on some systems.
310 310 """
311 311 raise NotImplementedError
312 312
313 313 def add(self, ui, match, dryrun, prefix):
314 314 return []
315 315
316 316 def status(self, rev2, **opts):
317 317 return [], [], [], [], [], [], []
318 318
319 319 def diff(self, diffopts, node2, match, prefix, **opts):
320 320 pass
321 321
322 322 def outgoing(self, ui, dest, opts):
323 323 return 1
324 324
325 325 def incoming(self, ui, source, opts):
326 326 return 1
327 327
328 328 def files(self):
329 329 """return filename iterator"""
330 330 raise NotImplementedError
331 331
332 332 def filedata(self, name):
333 333 """return file data"""
334 334 raise NotImplementedError
335 335
336 336 def fileflags(self, name):
337 337 """return file flags"""
338 338 return ''
339 339
340 340 def archive(self, ui, archiver, prefix):
341 341 files = self.files()
342 342 total = len(files)
343 343 relpath = subrelpath(self)
344 344 ui.progress(_('archiving (%s)') % relpath, 0,
345 345 unit=_('files'), total=total)
346 346 for i, name in enumerate(files):
347 347 flags = self.fileflags(name)
348 348 mode = 'x' in flags and 0755 or 0644
349 349 symlink = 'l' in flags
350 350 archiver.addfile(os.path.join(prefix, self._path, name),
351 351 mode, symlink, self.filedata(name))
352 352 ui.progress(_('archiving (%s)') % relpath, i + 1,
353 353 unit=_('files'), total=total)
354 354 ui.progress(_('archiving (%s)') % relpath, None)
355 355
356 356 def walk(self, match):
357 357 '''
358 358 walk recursively through the directory tree, finding all files
359 359 matched by the match function
360 360 '''
361 361 pass
362 362
363 363 def forget(self, files):
364 364 pass
365 365
366 366 class hgsubrepo(abstractsubrepo):
367 367 def __init__(self, ctx, path, state):
368 368 self._path = path
369 369 self._state = state
370 370 r = ctx._repo
371 371 root = r.wjoin(path)
372 372 create = False
373 373 if not os.path.exists(os.path.join(root, '.hg')):
374 374 create = True
375 375 util.makedirs(root)
376 376 self._repo = hg.repository(r.ui, root, create=create)
377 377 self._initrepo(r, state[0], create)
378 378
379 379 def _initrepo(self, parentrepo, source, create):
380 380 self._repo._subparent = parentrepo
381 381 self._repo._subsource = source
382 382
383 383 if create:
384 384 fp = self._repo.opener("hgrc", "w", text=True)
385 385 fp.write('[paths]\n')
386 386
387 387 def addpathconfig(key, value):
388 388 if value:
389 389 fp.write('%s = %s\n' % (key, value))
390 390 self._repo.ui.setconfig('paths', key, value)
391 391
392 392 defpath = _abssource(self._repo, abort=False)
393 393 defpushpath = _abssource(self._repo, True, abort=False)
394 394 addpathconfig('default', defpath)
395 395 if defpath != defpushpath:
396 396 addpathconfig('default-push', defpushpath)
397 397 fp.close()
398 398
399 399 def add(self, ui, match, dryrun, prefix):
400 400 return cmdutil.add(ui, self._repo, match, dryrun, True,
401 401 os.path.join(prefix, self._path))
402 402
403 403 def status(self, rev2, **opts):
404 404 try:
405 405 rev1 = self._state[1]
406 406 ctx1 = self._repo[rev1]
407 407 ctx2 = self._repo[rev2]
408 408 return self._repo.status(ctx1, ctx2, **opts)
409 409 except error.RepoLookupError, inst:
410 410 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
411 411 % (inst, subrelpath(self)))
412 412 return [], [], [], [], [], [], []
413 413
414 414 def diff(self, diffopts, node2, match, prefix, **opts):
415 415 try:
416 416 node1 = node.bin(self._state[1])
417 417 # We currently expect node2 to come from substate and be
418 418 # in hex format
419 419 if node2 is not None:
420 420 node2 = node.bin(node2)
421 421 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
422 422 node1, node2, match,
423 423 prefix=os.path.join(prefix, self._path),
424 424 listsubrepos=True, **opts)
425 425 except error.RepoLookupError, inst:
426 426 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
427 427 % (inst, subrelpath(self)))
428 428
429 429 def archive(self, ui, archiver, prefix):
430 430 self._get(self._state + ('hg',))
431 431 abstractsubrepo.archive(self, ui, archiver, prefix)
432 432
433 433 rev = self._state[1]
434 434 ctx = self._repo[rev]
435 435 for subpath in ctx.substate:
436 436 s = subrepo(ctx, subpath)
437 437 s.archive(ui, archiver, os.path.join(prefix, self._path))
438 438
439 439 def dirty(self, ignoreupdate=False):
440 440 r = self._state[1]
441 441 if r == '' and not ignoreupdate: # no state recorded
442 442 return True
443 443 w = self._repo[None]
444 444 if r != w.p1().hex() and not ignoreupdate:
445 445 # different version checked out
446 446 return True
447 447 return w.dirty() # working directory changed
448 448
449 449 def checknested(self, path):
450 450 return self._repo._checknested(self._repo.wjoin(path))
451 451
452 452 def commit(self, text, user, date):
453 453 # don't bother committing in the subrepo if it's only been
454 454 # updated
455 455 if not self.dirty(True):
456 456 return self._repo['.'].hex()
457 457 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
458 458 n = self._repo.commit(text, user, date)
459 459 if not n:
460 460 return self._repo['.'].hex() # different version checked out
461 461 return node.hex(n)
462 462
463 463 def remove(self):
464 464 # we can't fully delete the repository as it may contain
465 465 # local-only history
466 466 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
467 467 hg.clean(self._repo, node.nullid, False)
468 468
469 469 def _get(self, state):
470 470 source, revision, kind = state
471 471 if revision not in self._repo:
472 472 self._repo._subsource = source
473 473 srcurl = _abssource(self._repo)
474 474 other = hg.peer(self._repo.ui, {}, srcurl)
475 475 if len(self._repo) == 0:
476 476 self._repo.ui.status(_('cloning subrepo %s from %s\n')
477 477 % (subrelpath(self), srcurl))
478 478 parentrepo = self._repo._subparent
479 479 shutil.rmtree(self._repo.path)
480 480 other, self._repo = hg.clone(self._repo._subparent.ui, {}, other,
481 481 self._repo.root, update=False)
482 482 self._initrepo(parentrepo, source, create=True)
483 483 else:
484 484 self._repo.ui.status(_('pulling subrepo %s from %s\n')
485 485 % (subrelpath(self), srcurl))
486 486 self._repo.pull(other)
487 487 bookmarks.updatefromremote(self._repo.ui, self._repo, other,
488 488 srcurl)
489 489
490 490 def get(self, state, overwrite=False):
491 491 self._get(state)
492 492 source, revision, kind = state
493 493 self._repo.ui.debug("getting subrepo %s\n" % self._path)
494 494 hg.clean(self._repo, revision, False)
495 495
496 496 def merge(self, state):
497 497 self._get(state)
498 498 cur = self._repo['.']
499 499 dst = self._repo[state[1]]
500 500 anc = dst.ancestor(cur)
501 501
502 502 def mergefunc():
503 503 if anc == cur:
504 504 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
505 505 hg.update(self._repo, state[1])
506 506 elif anc == dst:
507 507 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
508 508 else:
509 509 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
510 510 hg.merge(self._repo, state[1], remind=False)
511 511
512 512 wctx = self._repo[None]
513 513 if self.dirty():
514 514 if anc != dst:
515 515 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
516 516 mergefunc()
517 517 else:
518 518 mergefunc()
519 519 else:
520 520 mergefunc()
521 521
522 522 def push(self, opts):
523 523 force = opts.get('force')
524 524 newbranch = opts.get('new_branch')
525 525 ssh = opts.get('ssh')
526 526
527 527 # push subrepos depth-first for coherent ordering
528 528 c = self._repo['']
529 529 subs = c.substate # only repos that are committed
530 530 for s in sorted(subs):
531 531 if not c.sub(s).push(opts):
532 532 return False
533 533
534 534 dsturl = _abssource(self._repo, True)
535 535 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
536 536 (subrelpath(self), dsturl))
537 537 other = hg.peer(self._repo.ui, {'ssh': ssh}, dsturl)
538 538 return self._repo.push(other, force, newbranch=newbranch)
539 539
540 540 def outgoing(self, ui, dest, opts):
541 541 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
542 542
543 543 def incoming(self, ui, source, opts):
544 544 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
545 545
546 546 def files(self):
547 547 rev = self._state[1]
548 548 ctx = self._repo[rev]
549 549 return ctx.manifest()
550 550
551 551 def filedata(self, name):
552 552 rev = self._state[1]
553 553 return self._repo[rev][name].data()
554 554
555 555 def fileflags(self, name):
556 556 rev = self._state[1]
557 557 ctx = self._repo[rev]
558 558 return ctx.flags(name)
559 559
560 560 def walk(self, match):
561 561 ctx = self._repo[None]
562 562 return ctx.walk(match)
563 563
564 564 def forget(self, files):
565 565 ctx = self._repo[None]
566 566 ctx.forget(files)
567 567
568 568 class svnsubrepo(abstractsubrepo):
569 569 def __init__(self, ctx, path, state):
570 570 self._path = path
571 571 self._state = state
572 572 self._ctx = ctx
573 573 self._ui = ctx._repo.ui
574 574 self._exe = util.findexe('svn')
575 575 if not self._exe:
576 576 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
577 577 % self._path)
578 578
579 579 def _svncommand(self, commands, filename='', failok=False):
580 580 cmd = [self._exe]
581 581 extrakw = {}
582 582 if not self._ui.interactive():
583 583 # Making stdin be a pipe should prevent svn from behaving
584 584 # interactively even if we can't pass --non-interactive.
585 585 extrakw['stdin'] = subprocess.PIPE
586 586 # Starting in svn 1.5 --non-interactive is a global flag
587 587 # instead of being per-command, but we need to support 1.4 so
588 588 # we have to be intelligent about what commands take
589 589 # --non-interactive.
590 590 if commands[0] in ('update', 'checkout', 'commit'):
591 591 cmd.append('--non-interactive')
592 592 cmd.extend(commands)
593 593 if filename is not None:
594 594 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
595 595 cmd.append(path)
596 596 env = dict(os.environ)
597 597 # Avoid localized output, preserve current locale for everything else.
598 598 env['LC_MESSAGES'] = 'C'
599 599 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
600 600 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
601 601 universal_newlines=True, env=env, **extrakw)
602 602 stdout, stderr = p.communicate()
603 603 stderr = stderr.strip()
604 604 if not failok:
605 605 if p.returncode:
606 606 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
607 607 if stderr:
608 608 self._ui.warn(stderr + '\n')
609 609 return stdout, stderr
610 610
611 611 @propertycache
612 612 def _svnversion(self):
613 613 output, err = self._svncommand(['--version'], filename=None)
614 614 m = re.search(r'^svn,\s+version\s+(\d+)\.(\d+)', output)
615 615 if not m:
616 616 raise util.Abort(_('cannot retrieve svn tool version'))
617 617 return (int(m.group(1)), int(m.group(2)))
618 618
619 619 def _wcrevs(self):
620 620 # Get the working directory revision as well as the last
621 621 # commit revision so we can compare the subrepo state with
622 622 # both. We used to store the working directory one.
623 623 output, err = self._svncommand(['info', '--xml'])
624 624 doc = xml.dom.minidom.parseString(output)
625 625 entries = doc.getElementsByTagName('entry')
626 626 lastrev, rev = '0', '0'
627 627 if entries:
628 628 rev = str(entries[0].getAttribute('revision')) or '0'
629 629 commits = entries[0].getElementsByTagName('commit')
630 630 if commits:
631 631 lastrev = str(commits[0].getAttribute('revision')) or '0'
632 632 return (lastrev, rev)
633 633
634 634 def _wcrev(self):
635 635 return self._wcrevs()[0]
636 636
637 637 def _wcchanged(self):
638 638 """Return (changes, extchanges) where changes is True
639 639 if the working directory was changed, and extchanges is
640 640 True if any of these changes concern an external entry.
641 641 """
642 642 output, err = self._svncommand(['status', '--xml'])
643 643 externals, changes = [], []
644 644 doc = xml.dom.minidom.parseString(output)
645 645 for e in doc.getElementsByTagName('entry'):
646 646 s = e.getElementsByTagName('wc-status')
647 647 if not s:
648 648 continue
649 649 item = s[0].getAttribute('item')
650 650 props = s[0].getAttribute('props')
651 651 path = e.getAttribute('path')
652 652 if item == 'external':
653 653 externals.append(path)
654 654 if (item not in ('', 'normal', 'unversioned', 'external')
655 655 or props not in ('', 'none', 'normal')):
656 656 changes.append(path)
657 657 for path in changes:
658 658 for ext in externals:
659 659 if path == ext or path.startswith(ext + os.sep):
660 660 return True, True
661 661 return bool(changes), False
662 662
663 663 def dirty(self, ignoreupdate=False):
664 664 if not self._wcchanged()[0]:
665 665 if self._state[1] in self._wcrevs() or ignoreupdate:
666 666 return False
667 667 return True
668 668
669 669 def commit(self, text, user, date):
670 670 # user and date are out of our hands since svn is centralized
671 671 changed, extchanged = self._wcchanged()
672 672 if not changed:
673 673 return self._wcrev()
674 674 if extchanged:
675 675 # Do not try to commit externals
676 676 raise util.Abort(_('cannot commit svn externals'))
677 677 commitinfo, err = self._svncommand(['commit', '-m', text])
678 678 self._ui.status(commitinfo)
679 679 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
680 680 if not newrev:
681 681 raise util.Abort(commitinfo.splitlines()[-1])
682 682 newrev = newrev.groups()[0]
683 683 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
684 684 return newrev
685 685
686 686 def remove(self):
687 687 if self.dirty():
688 688 self._ui.warn(_('not removing repo %s because '
689 689 'it has changes.\n' % self._path))
690 690 return
691 691 self._ui.note(_('removing subrepo %s\n') % self._path)
692 692
693 693 def onerror(function, path, excinfo):
694 694 if function is not os.remove:
695 695 raise
696 696 # read-only files cannot be unlinked under Windows
697 697 s = os.stat(path)
698 698 if (s.st_mode & stat.S_IWRITE) != 0:
699 699 raise
700 700 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
701 701 os.remove(path)
702 702
703 703 path = self._ctx._repo.wjoin(self._path)
704 704 shutil.rmtree(path, onerror=onerror)
705 705 try:
706 706 os.removedirs(os.path.dirname(path))
707 707 except OSError:
708 708 pass
709 709
710 710 def get(self, state, overwrite=False):
711 711 if overwrite:
712 712 self._svncommand(['revert', '--recursive'])
713 713 args = ['checkout']
714 714 if self._svnversion >= (1, 5):
715 715 args.append('--force')
716 716 # The revision must be specified at the end of the URL to properly
717 717 # update to a directory which has since been deleted and recreated.
718 718 args.append('%s@%s' % (state[0], state[1]))
719 719 status, err = self._svncommand(args, failok=True)
720 720 if not re.search('Checked out revision [0-9]+.', status):
721 721 if ('is already a working copy for a different URL' in err
722 722 and (self._wcchanged() == (False, False))):
723 723 # obstructed but clean working copy, so just blow it away.
724 724 self.remove()
725 725 self.get(state, overwrite=False)
726 726 return
727 727 raise util.Abort((status or err).splitlines()[-1])
728 728 self._ui.status(status)
729 729
730 730 def merge(self, state):
731 731 old = self._state[1]
732 732 new = state[1]
733 733 if new != self._wcrev():
734 734 dirty = old == self._wcrev() or self._wcchanged()[0]
735 735 if _updateprompt(self._ui, self, dirty, self._wcrev(), new):
736 736 self.get(state, False)
737 737
738 738 def push(self, opts):
739 739 # push is a no-op for SVN
740 740 return True
741 741
742 742 def files(self):
743 743 output = self._svncommand(['list'])
744 744 # This works because svn forbids \n in filenames.
745 745 return output.splitlines()
746 746
747 747 def filedata(self, name):
748 748 return self._svncommand(['cat'], name)
749 749
750 750
751 751 class gitsubrepo(abstractsubrepo):
752 752 def __init__(self, ctx, path, state):
753 753 # TODO add git version check.
754 754 self._state = state
755 755 self._ctx = ctx
756 756 self._path = path
757 757 self._relpath = os.path.join(reporelpath(ctx._repo), path)
758 758 self._abspath = ctx._repo.wjoin(path)
759 759 self._subparent = ctx._repo
760 760 self._ui = ctx._repo.ui
761 761
762 762 def _gitcommand(self, commands, env=None, stream=False):
763 763 return self._gitdir(commands, env=env, stream=stream)[0]
764 764
765 765 def _gitdir(self, commands, env=None, stream=False):
766 766 return self._gitnodir(commands, env=env, stream=stream,
767 767 cwd=self._abspath)
768 768
769 769 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
770 770 """Calls the git command
771 771
772 772 The methods tries to call the git command. versions previor to 1.6.0
773 773 are not supported and very probably fail.
774 774 """
775 775 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
776 776 # unless ui.quiet is set, print git's stderr,
777 777 # which is mostly progress and useful info
778 778 errpipe = None
779 779 if self._ui.quiet:
780 780 errpipe = open(os.devnull, 'w')
781 781 p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
782 782 close_fds=util.closefds,
783 783 stdout=subprocess.PIPE, stderr=errpipe)
784 784 if stream:
785 785 return p.stdout, None
786 786
787 787 retdata = p.stdout.read().strip()
788 788 # wait for the child to exit to avoid race condition.
789 789 p.wait()
790 790
791 791 if p.returncode != 0 and p.returncode != 1:
792 792 # there are certain error codes that are ok
793 793 command = commands[0]
794 794 if command in ('cat-file', 'symbolic-ref'):
795 795 return retdata, p.returncode
796 796 # for all others, abort
797 797 raise util.Abort('git %s error %d in %s' %
798 798 (command, p.returncode, self._relpath))
799 799
800 800 return retdata, p.returncode
801 801
802 802 def _gitmissing(self):
803 803 return not os.path.exists(os.path.join(self._abspath, '.git'))
804 804
805 805 def _gitstate(self):
806 806 return self._gitcommand(['rev-parse', 'HEAD'])
807 807
808 808 def _gitcurrentbranch(self):
809 809 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
810 810 if err:
811 811 current = None
812 812 return current
813 813
814 814 def _gitremote(self, remote):
815 815 out = self._gitcommand(['remote', 'show', '-n', remote])
816 816 line = out.split('\n')[1]
817 817 i = line.index('URL: ') + len('URL: ')
818 818 return line[i:]
819 819
820 820 def _githavelocally(self, revision):
821 821 out, code = self._gitdir(['cat-file', '-e', revision])
822 822 return code == 0
823 823
824 824 def _gitisancestor(self, r1, r2):
825 825 base = self._gitcommand(['merge-base', r1, r2])
826 826 return base == r1
827 827
828 828 def _gitisbare(self):
829 829 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
830 830
831 831 def _gitupdatestat(self):
832 832 """This must be run before git diff-index.
833 833 diff-index only looks at changes to file stat;
834 834 this command looks at file contents and updates the stat."""
835 835 self._gitcommand(['update-index', '-q', '--refresh'])
836 836
837 837 def _gitbranchmap(self):
838 838 '''returns 2 things:
839 839 a map from git branch to revision
840 840 a map from revision to branches'''
841 841 branch2rev = {}
842 842 rev2branch = {}
843 843
844 844 out = self._gitcommand(['for-each-ref', '--format',
845 845 '%(objectname) %(refname)'])
846 846 for line in out.split('\n'):
847 847 revision, ref = line.split(' ')
848 848 if (not ref.startswith('refs/heads/') and
849 849 not ref.startswith('refs/remotes/')):
850 850 continue
851 851 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
852 852 continue # ignore remote/HEAD redirects
853 853 branch2rev[ref] = revision
854 854 rev2branch.setdefault(revision, []).append(ref)
855 855 return branch2rev, rev2branch
856 856
857 857 def _gittracking(self, branches):
858 858 'return map of remote branch to local tracking branch'
859 859 # assumes no more than one local tracking branch for each remote
860 860 tracking = {}
861 861 for b in branches:
862 862 if b.startswith('refs/remotes/'):
863 863 continue
864 864 bname = b.split('/', 2)[2]
865 865 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
866 866 if remote:
867 867 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
868 868 tracking['refs/remotes/%s/%s' %
869 869 (remote, ref.split('/', 2)[2])] = b
870 870 return tracking
871 871
872 872 def _abssource(self, source):
873 873 if '://' not in source:
874 874 # recognize the scp syntax as an absolute source
875 875 colon = source.find(':')
876 876 if colon != -1 and '/' not in source[:colon]:
877 877 return source
878 878 self._subsource = source
879 879 return _abssource(self)
880 880
881 881 def _fetch(self, source, revision):
882 882 if self._gitmissing():
883 883 source = self._abssource(source)
884 884 self._ui.status(_('cloning subrepo %s from %s\n') %
885 885 (self._relpath, source))
886 886 self._gitnodir(['clone', source, self._abspath])
887 887 if self._githavelocally(revision):
888 888 return
889 889 self._ui.status(_('pulling subrepo %s from %s\n') %
890 890 (self._relpath, self._gitremote('origin')))
891 891 # try only origin: the originally cloned repo
892 892 self._gitcommand(['fetch'])
893 893 if not self._githavelocally(revision):
894 894 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
895 895 (revision, self._relpath))
896 896
897 897 def dirty(self, ignoreupdate=False):
898 898 if self._gitmissing():
899 899 return self._state[1] != ''
900 900 if self._gitisbare():
901 901 return True
902 902 if not ignoreupdate and self._state[1] != self._gitstate():
903 903 # different version checked out
904 904 return True
905 905 # check for staged changes or modified files; ignore untracked files
906 906 self._gitupdatestat()
907 907 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
908 908 return code == 1
909 909
910 910 def get(self, state, overwrite=False):
911 911 source, revision, kind = state
912 912 if not revision:
913 913 self.remove()
914 914 return
915 915 self._fetch(source, revision)
916 916 # if the repo was set to be bare, unbare it
917 917 if self._gitisbare():
918 918 self._gitcommand(['config', 'core.bare', 'false'])
919 919 if self._gitstate() == revision:
920 920 self._gitcommand(['reset', '--hard', 'HEAD'])
921 921 return
922 922 elif self._gitstate() == revision:
923 923 if overwrite:
924 924 # first reset the index to unmark new files for commit, because
925 925 # reset --hard will otherwise throw away files added for commit,
926 926 # not just unmark them.
927 927 self._gitcommand(['reset', 'HEAD'])
928 928 self._gitcommand(['reset', '--hard', 'HEAD'])
929 929 return
930 930 branch2rev, rev2branch = self._gitbranchmap()
931 931
932 932 def checkout(args):
933 933 cmd = ['checkout']
934 934 if overwrite:
935 935 # first reset the index to unmark new files for commit, because
936 936 # the -f option will otherwise throw away files added for
937 937 # commit, not just unmark them.
938 938 self._gitcommand(['reset', 'HEAD'])
939 939 cmd.append('-f')
940 940 self._gitcommand(cmd + args)
941 941
942 942 def rawcheckout():
943 943 # no branch to checkout, check it out with no branch
944 944 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
945 945 self._relpath)
946 946 self._ui.warn(_('check out a git branch if you intend '
947 947 'to make changes\n'))
948 948 checkout(['-q', revision])
949 949
950 950 if revision not in rev2branch:
951 951 rawcheckout()
952 952 return
953 953 branches = rev2branch[revision]
954 954 firstlocalbranch = None
955 955 for b in branches:
956 956 if b == 'refs/heads/master':
957 957 # master trumps all other branches
958 958 checkout(['refs/heads/master'])
959 959 return
960 960 if not firstlocalbranch and not b.startswith('refs/remotes/'):
961 961 firstlocalbranch = b
962 962 if firstlocalbranch:
963 963 checkout([firstlocalbranch])
964 964 return
965 965
966 966 tracking = self._gittracking(branch2rev.keys())
967 967 # choose a remote branch already tracked if possible
968 968 remote = branches[0]
969 969 if remote not in tracking:
970 970 for b in branches:
971 971 if b in tracking:
972 972 remote = b
973 973 break
974 974
975 975 if remote not in tracking:
976 976 # create a new local tracking branch
977 977 local = remote.split('/', 2)[2]
978 978 checkout(['-b', local, remote])
979 979 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
980 980 # When updating to a tracked remote branch,
981 981 # if the local tracking branch is downstream of it,
982 982 # a normal `git pull` would have performed a "fast-forward merge"
983 983 # which is equivalent to updating the local branch to the remote.
984 984 # Since we are only looking at branching at update, we need to
985 985 # detect this situation and perform this action lazily.
986 986 if tracking[remote] != self._gitcurrentbranch():
987 987 checkout([tracking[remote]])
988 988 self._gitcommand(['merge', '--ff', remote])
989 989 else:
990 990 # a real merge would be required, just checkout the revision
991 991 rawcheckout()
992 992
993 993 def commit(self, text, user, date):
994 994 if self._gitmissing():
995 995 raise util.Abort(_("subrepo %s is missing") % self._relpath)
996 996 cmd = ['commit', '-a', '-m', text]
997 997 env = os.environ.copy()
998 998 if user:
999 999 cmd += ['--author', user]
1000 1000 if date:
1001 1001 # git's date parser silently ignores when seconds < 1e9
1002 1002 # convert to ISO8601
1003 1003 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1004 1004 '%Y-%m-%dT%H:%M:%S %1%2')
1005 1005 self._gitcommand(cmd, env=env)
1006 1006 # make sure commit works otherwise HEAD might not exist under certain
1007 1007 # circumstances
1008 1008 return self._gitstate()
1009 1009
1010 1010 def merge(self, state):
1011 1011 source, revision, kind = state
1012 1012 self._fetch(source, revision)
1013 1013 base = self._gitcommand(['merge-base', revision, self._state[1]])
1014 1014 self._gitupdatestat()
1015 1015 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1016 1016
1017 1017 def mergefunc():
1018 1018 if base == revision:
1019 1019 self.get(state) # fast forward merge
1020 1020 elif base != self._state[1]:
1021 1021 self._gitcommand(['merge', '--no-commit', revision])
1022 1022
1023 1023 if self.dirty():
1024 1024 if self._gitstate() != revision:
1025 1025 dirty = self._gitstate() == self._state[1] or code != 0
1026 1026 if _updateprompt(self._ui, self, dirty,
1027 1027 self._state[1][:7], revision[:7]):
1028 1028 mergefunc()
1029 1029 else:
1030 1030 mergefunc()
1031 1031
1032 1032 def push(self, opts):
1033 1033 force = opts.get('force')
1034 1034
1035 1035 if not self._state[1]:
1036 1036 return True
1037 1037 if self._gitmissing():
1038 1038 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1039 1039 # if a branch in origin contains the revision, nothing to do
1040 1040 branch2rev, rev2branch = self._gitbranchmap()
1041 1041 if self._state[1] in rev2branch:
1042 1042 for b in rev2branch[self._state[1]]:
1043 1043 if b.startswith('refs/remotes/origin/'):
1044 1044 return True
1045 1045 for b, revision in branch2rev.iteritems():
1046 1046 if b.startswith('refs/remotes/origin/'):
1047 1047 if self._gitisancestor(self._state[1], revision):
1048 1048 return True
1049 1049 # otherwise, try to push the currently checked out branch
1050 1050 cmd = ['push']
1051 1051 if force:
1052 1052 cmd.append('--force')
1053 1053
1054 1054 current = self._gitcurrentbranch()
1055 1055 if current:
1056 1056 # determine if the current branch is even useful
1057 1057 if not self._gitisancestor(self._state[1], current):
1058 1058 self._ui.warn(_('unrelated git branch checked out '
1059 1059 'in subrepo %s\n') % self._relpath)
1060 1060 return False
1061 1061 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1062 1062 (current.split('/', 2)[2], self._relpath))
1063 1063 self._gitcommand(cmd + ['origin', current])
1064 1064 return True
1065 1065 else:
1066 1066 self._ui.warn(_('no branch checked out in subrepo %s\n'
1067 1067 'cannot push revision %s') %
1068 1068 (self._relpath, self._state[1]))
1069 1069 return False
1070 1070
1071 1071 def remove(self):
1072 1072 if self._gitmissing():
1073 1073 return
1074 1074 if self.dirty():
1075 1075 self._ui.warn(_('not removing repo %s because '
1076 1076 'it has changes.\n') % self._relpath)
1077 1077 return
1078 1078 # we can't fully delete the repository as it may contain
1079 1079 # local-only history
1080 1080 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1081 1081 self._gitcommand(['config', 'core.bare', 'true'])
1082 1082 for f in os.listdir(self._abspath):
1083 1083 if f == '.git':
1084 1084 continue
1085 1085 path = os.path.join(self._abspath, f)
1086 1086 if os.path.isdir(path) and not os.path.islink(path):
1087 1087 shutil.rmtree(path)
1088 1088 else:
1089 1089 os.remove(path)
1090 1090
1091 1091 def archive(self, ui, archiver, prefix):
1092 1092 source, revision = self._state
1093 1093 if not revision:
1094 1094 return
1095 1095 self._fetch(source, revision)
1096 1096
1097 1097 # Parse git's native archive command.
1098 1098 # This should be much faster than manually traversing the trees
1099 1099 # and objects with many subprocess calls.
1100 1100 tarstream = self._gitcommand(['archive', revision], stream=True)
1101 1101 tar = tarfile.open(fileobj=tarstream, mode='r|')
1102 1102 relpath = subrelpath(self)
1103 1103 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1104 1104 for i, info in enumerate(tar):
1105 1105 if info.isdir():
1106 1106 continue
1107 1107 if info.issym():
1108 1108 data = info.linkname
1109 1109 else:
1110 1110 data = tar.extractfile(info).read()
1111 1111 archiver.addfile(os.path.join(prefix, self._path, info.name),
1112 1112 info.mode, info.issym(), data)
1113 1113 ui.progress(_('archiving (%s)') % relpath, i + 1,
1114 1114 unit=_('files'))
1115 1115 ui.progress(_('archiving (%s)') % relpath, None)
1116 1116
1117 1117
1118 1118 def status(self, rev2, **opts):
1119 1119 rev1 = self._state[1]
1120 1120 if self._gitmissing() or not rev1:
1121 1121 # if the repo is missing, return no results
1122 1122 return [], [], [], [], [], [], []
1123 1123 modified, added, removed = [], [], []
1124 1124 self._gitupdatestat()
1125 1125 if rev2:
1126 1126 command = ['diff-tree', rev1, rev2]
1127 1127 else:
1128 1128 command = ['diff-index', rev1]
1129 1129 out = self._gitcommand(command)
1130 1130 for line in out.split('\n'):
1131 1131 tab = line.find('\t')
1132 1132 if tab == -1:
1133 1133 continue
1134 1134 status, f = line[tab - 1], line[tab + 1:]
1135 1135 if status == 'M':
1136 1136 modified.append(f)
1137 1137 elif status == 'A':
1138 1138 added.append(f)
1139 1139 elif status == 'D':
1140 1140 removed.append(f)
1141 1141
1142 1142 deleted = unknown = ignored = clean = []
1143 1143 return modified, added, removed, deleted, unknown, ignored, clean
1144 1144
1145 1145 types = {
1146 1146 'hg': hgsubrepo,
1147 1147 'svn': svnsubrepo,
1148 1148 'git': gitsubrepo,
1149 1149 }
General Comments 0
You need to be logged in to leave comments. Login now