##// END OF EJS Templates
subrepo: backout f02d7a562a21...
Erik Zielke -
r13172:84cec589 default
parent child Browse files
Show More
@@ -1,1951 +1,1938 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supportedformats = set(('revlogv1', 'parentdelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = util.path_auditor(self.root, self._checknested)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 os.mkdir(self.path)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener("00changelog.i", "a").write(
60 60 '\0\0\0\2' # represents revlogv2
61 61 ' dummy changelog to prevent using the old repo layout'
62 62 )
63 63 if self.ui.configbool('format', 'parentdelta', False):
64 64 requirements.append("parentdelta")
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118 if 'parentdelta' in requirements:
119 119 self.sopener.options['parentdelta'] = 1
120 120
121 121 def _writerequirements(self):
122 122 reqfile = self.opener("requires", "w")
123 123 for r in self.requirements:
124 124 reqfile.write("%s\n" % r)
125 125 reqfile.close()
126 126
127 127 def _checknested(self, path):
128 128 """Determine if path is a legal nested repository."""
129 129 if not path.startswith(self.root):
130 130 return False
131 131 subpath = path[len(self.root) + 1:]
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = os.sep.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == subpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 164
165 165 @propertycache
166 166 def changelog(self):
167 167 c = changelog.changelog(self.sopener)
168 168 if 'HG_PENDING' in os.environ:
169 169 p = os.environ['HG_PENDING']
170 170 if p.startswith(self.root):
171 171 c.readpending('00changelog.i.a')
172 172 self.sopener.options['defversion'] = c.version
173 173 return c
174 174
175 175 @propertycache
176 176 def manifest(self):
177 177 return manifest.manifest(self.sopener)
178 178
179 179 @propertycache
180 180 def dirstate(self):
181 181 warned = [0]
182 182 def validate(node):
183 183 try:
184 184 r = self.changelog.rev(node)
185 185 return node
186 186 except error.LookupError:
187 187 if not warned[0]:
188 188 warned[0] = True
189 189 self.ui.warn(_("warning: ignoring unknown"
190 190 " working parent %s!\n") % short(node))
191 191 return nullid
192 192
193 193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194 194
195 195 def __getitem__(self, changeid):
196 196 if changeid is None:
197 197 return context.workingctx(self)
198 198 return context.changectx(self, changeid)
199 199
200 200 def __contains__(self, changeid):
201 201 try:
202 202 return bool(self.lookup(changeid))
203 203 except error.RepoLookupError:
204 204 return False
205 205
206 206 def __nonzero__(self):
207 207 return True
208 208
209 209 def __len__(self):
210 210 return len(self.changelog)
211 211
212 212 def __iter__(self):
213 213 for i in xrange(len(self)):
214 214 yield i
215 215
216 216 def url(self):
217 217 return 'file:' + self.root
218 218
219 219 def hook(self, name, throw=False, **args):
220 220 return hook.hook(self.ui, self, name, throw, **args)
221 221
222 222 tag_disallowed = ':\r\n'
223 223
224 224 def _tag(self, names, node, message, local, user, date, extra={}):
225 225 if isinstance(names, str):
226 226 allchars = names
227 227 names = (names,)
228 228 else:
229 229 allchars = ''.join(names)
230 230 for c in self.tag_disallowed:
231 231 if c in allchars:
232 232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233 233
234 234 branches = self.branchmap()
235 235 for name in names:
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 237 local=local)
238 238 if name in branches:
239 239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 240 " branch name\n") % name)
241 241
242 242 def writetags(fp, names, munge, prevtags):
243 243 fp.seek(0, 2)
244 244 if prevtags and prevtags[-1] != '\n':
245 245 fp.write('\n')
246 246 for name in names:
247 247 m = munge and munge(name) or name
248 248 if self._tagtypes and name in self._tagtypes:
249 249 old = self._tags.get(name, nullid)
250 250 fp.write('%s %s\n' % (hex(old), m))
251 251 fp.write('%s %s\n' % (hex(node), m))
252 252 fp.close()
253 253
254 254 prevtags = ''
255 255 if local:
256 256 try:
257 257 fp = self.opener('localtags', 'r+')
258 258 except IOError:
259 259 fp = self.opener('localtags', 'a')
260 260 else:
261 261 prevtags = fp.read()
262 262
263 263 # local tags are stored in the current charset
264 264 writetags(fp, names, None, prevtags)
265 265 for name in names:
266 266 self.hook('tag', node=hex(node), tag=name, local=local)
267 267 return
268 268
269 269 try:
270 270 fp = self.wfile('.hgtags', 'rb+')
271 271 except IOError:
272 272 fp = self.wfile('.hgtags', 'ab')
273 273 else:
274 274 prevtags = fp.read()
275 275
276 276 # committed tags are stored in UTF-8
277 277 writetags(fp, names, encoding.fromlocal, prevtags)
278 278
279 279 if '.hgtags' not in self.dirstate:
280 280 self[None].add(['.hgtags'])
281 281
282 282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284 284
285 285 for name in names:
286 286 self.hook('tag', node=hex(node), tag=name, local=local)
287 287
288 288 return tagnode
289 289
290 290 def tag(self, names, node, message, local, user, date):
291 291 '''tag a revision with one or more symbolic names.
292 292
293 293 names is a list of strings or, when adding a single tag, names may be a
294 294 string.
295 295
296 296 if local is True, the tags are stored in a per-repository file.
297 297 otherwise, they are stored in the .hgtags file, and a new
298 298 changeset is committed with the change.
299 299
300 300 keyword arguments:
301 301
302 302 local: whether to store tags in non-version-controlled file
303 303 (default False)
304 304
305 305 message: commit message to use if committing
306 306
307 307 user: name of user to use if committing
308 308
309 309 date: date tuple to use if committing'''
310 310
311 311 if not local:
312 312 for x in self.status()[:5]:
313 313 if '.hgtags' in x:
314 314 raise util.Abort(_('working copy of .hgtags is changed '
315 315 '(please commit .hgtags manually)'))
316 316
317 317 self.tags() # instantiate the cache
318 318 self._tag(names, node, message, local, user, date)
319 319
320 320 def tags(self):
321 321 '''return a mapping of tag to node'''
322 322 if self._tags is None:
323 323 (self._tags, self._tagtypes) = self._findtags()
324 324
325 325 return self._tags
326 326
327 327 def _findtags(self):
328 328 '''Do the hard work of finding tags. Return a pair of dicts
329 329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
330 330 maps tag name to a string like \'global\' or \'local\'.
331 331 Subclasses or extensions are free to add their own tags, but
332 332 should be aware that the returned dicts will be retained for the
333 333 duration of the localrepo object.'''
334 334
335 335 # XXX what tagtype should subclasses/extensions use? Currently
336 336 # mq and bookmarks add tags, but do not set the tagtype at all.
337 337 # Should each extension invent its own tag type? Should there
338 338 # be one tagtype for all such "virtual" tags? Or is the status
339 339 # quo fine?
340 340
341 341 alltags = {} # map tag name to (node, hist)
342 342 tagtypes = {}
343 343
344 344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
345 345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
346 346
347 347 # Build the return dicts. Have to re-encode tag names because
348 348 # the tags module always uses UTF-8 (in order not to lose info
349 349 # writing to the cache), but the rest of Mercurial wants them in
350 350 # local encoding.
351 351 tags = {}
352 352 for (name, (node, hist)) in alltags.iteritems():
353 353 if node != nullid:
354 354 tags[encoding.tolocal(name)] = node
355 355 tags['tip'] = self.changelog.tip()
356 356 tagtypes = dict([(encoding.tolocal(name), value)
357 357 for (name, value) in tagtypes.iteritems()])
358 358 return (tags, tagtypes)
359 359
360 360 def tagtype(self, tagname):
361 361 '''
362 362 return the type of the given tag. result can be:
363 363
364 364 'local' : a local tag
365 365 'global' : a global tag
366 366 None : tag does not exist
367 367 '''
368 368
369 369 self.tags()
370 370
371 371 return self._tagtypes.get(tagname)
372 372
373 373 def tagslist(self):
374 374 '''return a list of tags ordered by revision'''
375 375 l = []
376 376 for t, n in self.tags().iteritems():
377 377 try:
378 378 r = self.changelog.rev(n)
379 379 except:
380 380 r = -2 # sort to the beginning of the list if unknown
381 381 l.append((r, t, n))
382 382 return [(t, n) for r, t, n in sorted(l)]
383 383
384 384 def nodetags(self, node):
385 385 '''return the tags associated with a node'''
386 386 if not self.nodetagscache:
387 387 self.nodetagscache = {}
388 388 for t, n in self.tags().iteritems():
389 389 self.nodetagscache.setdefault(n, []).append(t)
390 390 for tags in self.nodetagscache.itervalues():
391 391 tags.sort()
392 392 return self.nodetagscache.get(node, [])
393 393
394 394 def _branchtags(self, partial, lrev):
395 395 # TODO: rename this function?
396 396 tiprev = len(self) - 1
397 397 if lrev != tiprev:
398 398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
399 399 self._updatebranchcache(partial, ctxgen)
400 400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
401 401
402 402 return partial
403 403
404 404 def updatebranchcache(self):
405 405 tip = self.changelog.tip()
406 406 if self._branchcache is not None and self._branchcachetip == tip:
407 407 return self._branchcache
408 408
409 409 oldtip = self._branchcachetip
410 410 self._branchcachetip = tip
411 411 if oldtip is None or oldtip not in self.changelog.nodemap:
412 412 partial, last, lrev = self._readbranchcache()
413 413 else:
414 414 lrev = self.changelog.rev(oldtip)
415 415 partial = self._branchcache
416 416
417 417 self._branchtags(partial, lrev)
418 418 # this private cache holds all heads (not just tips)
419 419 self._branchcache = partial
420 420
421 421 def branchmap(self):
422 422 '''returns a dictionary {branch: [branchheads]}'''
423 423 self.updatebranchcache()
424 424 return self._branchcache
425 425
426 426 def branchtags(self):
427 427 '''return a dict where branch names map to the tipmost head of
428 428 the branch, open heads come before closed'''
429 429 bt = {}
430 430 for bn, heads in self.branchmap().iteritems():
431 431 tip = heads[-1]
432 432 for h in reversed(heads):
433 433 if 'close' not in self.changelog.read(h)[5]:
434 434 tip = h
435 435 break
436 436 bt[bn] = tip
437 437 return bt
438 438
439 439 def _readbranchcache(self):
440 440 partial = {}
441 441 try:
442 442 f = self.opener("branchheads.cache")
443 443 lines = f.read().split('\n')
444 444 f.close()
445 445 except (IOError, OSError):
446 446 return {}, nullid, nullrev
447 447
448 448 try:
449 449 last, lrev = lines.pop(0).split(" ", 1)
450 450 last, lrev = bin(last), int(lrev)
451 451 if lrev >= len(self) or self[lrev].node() != last:
452 452 # invalidate the cache
453 453 raise ValueError('invalidating branch cache (tip differs)')
454 454 for l in lines:
455 455 if not l:
456 456 continue
457 457 node, label = l.split(" ", 1)
458 458 label = encoding.tolocal(label.strip())
459 459 partial.setdefault(label, []).append(bin(node))
460 460 except KeyboardInterrupt:
461 461 raise
462 462 except Exception, inst:
463 463 if self.ui.debugflag:
464 464 self.ui.warn(str(inst), '\n')
465 465 partial, last, lrev = {}, nullid, nullrev
466 466 return partial, last, lrev
467 467
468 468 def _writebranchcache(self, branches, tip, tiprev):
469 469 try:
470 470 f = self.opener("branchheads.cache", "w", atomictemp=True)
471 471 f.write("%s %s\n" % (hex(tip), tiprev))
472 472 for label, nodes in branches.iteritems():
473 473 for node in nodes:
474 474 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
475 475 f.rename()
476 476 except (IOError, OSError):
477 477 pass
478 478
479 479 def _updatebranchcache(self, partial, ctxgen):
480 480 # collect new branch entries
481 481 newbranches = {}
482 482 for c in ctxgen:
483 483 newbranches.setdefault(c.branch(), []).append(c.node())
484 484 # if older branchheads are reachable from new ones, they aren't
485 485 # really branchheads. Note checking parents is insufficient:
486 486 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
487 487 for branch, newnodes in newbranches.iteritems():
488 488 bheads = partial.setdefault(branch, [])
489 489 bheads.extend(newnodes)
490 490 if len(bheads) <= 1:
491 491 continue
492 492 # starting from tip means fewer passes over reachable
493 493 while newnodes:
494 494 latest = newnodes.pop()
495 495 if latest not in bheads:
496 496 continue
497 497 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
498 498 reachable = self.changelog.reachable(latest, minbhrev)
499 499 reachable.remove(latest)
500 500 bheads = [b for b in bheads if b not in reachable]
501 501 partial[branch] = bheads
502 502
503 503 def lookup(self, key):
504 504 if isinstance(key, int):
505 505 return self.changelog.node(key)
506 506 elif key == '.':
507 507 return self.dirstate.parents()[0]
508 508 elif key == 'null':
509 509 return nullid
510 510 elif key == 'tip':
511 511 return self.changelog.tip()
512 512 n = self.changelog._match(key)
513 513 if n:
514 514 return n
515 515 if key in self.tags():
516 516 return self.tags()[key]
517 517 if key in self.branchtags():
518 518 return self.branchtags()[key]
519 519 n = self.changelog._partialmatch(key)
520 520 if n:
521 521 return n
522 522
523 523 # can't find key, check if it might have come from damaged dirstate
524 524 if key in self.dirstate.parents():
525 525 raise error.Abort(_("working directory has unknown parent '%s'!")
526 526 % short(key))
527 527 try:
528 528 if len(key) == 20:
529 529 key = hex(key)
530 530 except:
531 531 pass
532 532 raise error.RepoLookupError(_("unknown revision '%s'") % key)
533 533
534 534 def lookupbranch(self, key, remote=None):
535 535 repo = remote or self
536 536 if key in repo.branchmap():
537 537 return key
538 538
539 539 repo = (remote and remote.local()) and remote or self
540 540 return repo[key].branch()
541 541
542 542 def local(self):
543 543 return True
544 544
545 545 def join(self, f):
546 546 return os.path.join(self.path, f)
547 547
548 548 def wjoin(self, f):
549 549 return os.path.join(self.root, f)
550 550
551 551 def file(self, f):
552 552 if f[0] == '/':
553 553 f = f[1:]
554 554 return filelog.filelog(self.sopener, f)
555 555
556 556 def changectx(self, changeid):
557 557 return self[changeid]
558 558
559 559 def parents(self, changeid=None):
560 560 '''get list of changectxs for parents of changeid'''
561 561 return self[changeid].parents()
562 562
563 563 def filectx(self, path, changeid=None, fileid=None):
564 564 """changeid can be a changeset revision, node, or tag.
565 565 fileid can be a file revision or node."""
566 566 return context.filectx(self, path, changeid, fileid)
567 567
568 568 def getcwd(self):
569 569 return self.dirstate.getcwd()
570 570
571 571 def pathto(self, f, cwd=None):
572 572 return self.dirstate.pathto(f, cwd)
573 573
574 574 def wfile(self, f, mode='r'):
575 575 return self.wopener(f, mode)
576 576
577 577 def _link(self, f):
578 578 return os.path.islink(self.wjoin(f))
579 579
580 580 def _loadfilter(self, filter):
581 581 if filter not in self.filterpats:
582 582 l = []
583 583 for pat, cmd in self.ui.configitems(filter):
584 584 if cmd == '!':
585 585 continue
586 586 mf = matchmod.match(self.root, '', [pat])
587 587 fn = None
588 588 params = cmd
589 589 for name, filterfn in self._datafilters.iteritems():
590 590 if cmd.startswith(name):
591 591 fn = filterfn
592 592 params = cmd[len(name):].lstrip()
593 593 break
594 594 if not fn:
595 595 fn = lambda s, c, **kwargs: util.filter(s, c)
596 596 # Wrap old filters not supporting keyword arguments
597 597 if not inspect.getargspec(fn)[2]:
598 598 oldfn = fn
599 599 fn = lambda s, c, **kwargs: oldfn(s, c)
600 600 l.append((mf, fn, params))
601 601 self.filterpats[filter] = l
602 602 return self.filterpats[filter]
603 603
604 604 def _filter(self, filterpats, filename, data):
605 605 for mf, fn, cmd in filterpats:
606 606 if mf(filename):
607 607 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
608 608 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
609 609 break
610 610
611 611 return data
612 612
613 613 @propertycache
614 614 def _encodefilterpats(self):
615 615 return self._loadfilter('encode')
616 616
617 617 @propertycache
618 618 def _decodefilterpats(self):
619 619 return self._loadfilter('decode')
620 620
621 621 def adddatafilter(self, name, filter):
622 622 self._datafilters[name] = filter
623 623
624 624 def wread(self, filename):
625 625 if self._link(filename):
626 626 data = os.readlink(self.wjoin(filename))
627 627 else:
628 628 data = self.wopener(filename, 'r').read()
629 629 return self._filter(self._encodefilterpats, filename, data)
630 630
631 631 def wwrite(self, filename, data, flags):
632 632 data = self._filter(self._decodefilterpats, filename, data)
633 633 if 'l' in flags:
634 634 self.wopener.symlink(data, filename)
635 635 else:
636 636 self.wopener(filename, 'w').write(data)
637 637 if 'x' in flags:
638 638 util.set_flags(self.wjoin(filename), False, True)
639 639
640 640 def wwritedata(self, filename, data):
641 641 return self._filter(self._decodefilterpats, filename, data)
642 642
643 643 def transaction(self, desc):
644 644 tr = self._transref and self._transref() or None
645 645 if tr and tr.running():
646 646 return tr.nest()
647 647
648 648 # abort here if the journal already exists
649 649 if os.path.exists(self.sjoin("journal")):
650 650 raise error.RepoError(
651 651 _("abandoned transaction found - run hg recover"))
652 652
653 653 # save dirstate for rollback
654 654 try:
655 655 ds = self.opener("dirstate").read()
656 656 except IOError:
657 657 ds = ""
658 658 self.opener("journal.dirstate", "w").write(ds)
659 659 self.opener("journal.branch", "w").write(
660 660 encoding.fromlocal(self.dirstate.branch()))
661 661 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
662 662
663 663 renames = [(self.sjoin("journal"), self.sjoin("undo")),
664 664 (self.join("journal.dirstate"), self.join("undo.dirstate")),
665 665 (self.join("journal.branch"), self.join("undo.branch")),
666 666 (self.join("journal.desc"), self.join("undo.desc"))]
667 667 tr = transaction.transaction(self.ui.warn, self.sopener,
668 668 self.sjoin("journal"),
669 669 aftertrans(renames),
670 670 self.store.createmode)
671 671 self._transref = weakref.ref(tr)
672 672 return tr
673 673
674 674 def recover(self):
675 675 lock = self.lock()
676 676 try:
677 677 if os.path.exists(self.sjoin("journal")):
678 678 self.ui.status(_("rolling back interrupted transaction\n"))
679 679 transaction.rollback(self.sopener, self.sjoin("journal"),
680 680 self.ui.warn)
681 681 self.invalidate()
682 682 return True
683 683 else:
684 684 self.ui.warn(_("no interrupted transaction available\n"))
685 685 return False
686 686 finally:
687 687 lock.release()
688 688
689 689 def rollback(self, dryrun=False):
690 690 wlock = lock = None
691 691 try:
692 692 wlock = self.wlock()
693 693 lock = self.lock()
694 694 if os.path.exists(self.sjoin("undo")):
695 695 try:
696 696 args = self.opener("undo.desc", "r").read().splitlines()
697 697 if len(args) >= 3 and self.ui.verbose:
698 698 desc = _("rolling back to revision %s"
699 699 " (undo %s: %s)\n") % (
700 700 int(args[0]) - 1, args[1], args[2])
701 701 elif len(args) >= 2:
702 702 desc = _("rolling back to revision %s (undo %s)\n") % (
703 703 int(args[0]) - 1, args[1])
704 704 except IOError:
705 705 desc = _("rolling back unknown transaction\n")
706 706 self.ui.status(desc)
707 707 if dryrun:
708 708 return
709 709 transaction.rollback(self.sopener, self.sjoin("undo"),
710 710 self.ui.warn)
711 711 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
712 712 try:
713 713 branch = self.opener("undo.branch").read()
714 714 self.dirstate.setbranch(branch)
715 715 except IOError:
716 716 self.ui.warn(_("Named branch could not be reset, "
717 717 "current branch still is: %s\n")
718 718 % self.dirstate.branch())
719 719 self.invalidate()
720 720 self.dirstate.invalidate()
721 721 self.destroyed()
722 722 else:
723 723 self.ui.warn(_("no rollback information available\n"))
724 724 return 1
725 725 finally:
726 726 release(lock, wlock)
727 727
728 728 def invalidatecaches(self):
729 729 self._tags = None
730 730 self._tagtypes = None
731 731 self.nodetagscache = None
732 732 self._branchcache = None # in UTF-8
733 733 self._branchcachetip = None
734 734
735 735 def invalidate(self):
736 736 for a in "changelog manifest".split():
737 737 if a in self.__dict__:
738 738 delattr(self, a)
739 739 self.invalidatecaches()
740 740
741 741 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
742 742 try:
743 743 l = lock.lock(lockname, 0, releasefn, desc=desc)
744 744 except error.LockHeld, inst:
745 745 if not wait:
746 746 raise
747 747 self.ui.warn(_("waiting for lock on %s held by %r\n") %
748 748 (desc, inst.locker))
749 749 # default to 600 seconds timeout
750 750 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
751 751 releasefn, desc=desc)
752 752 if acquirefn:
753 753 acquirefn()
754 754 return l
755 755
756 756 def lock(self, wait=True):
757 757 '''Lock the repository store (.hg/store) and return a weak reference
758 758 to the lock. Use this before modifying the store (e.g. committing or
759 759 stripping). If you are opening a transaction, get a lock as well.)'''
760 760 l = self._lockref and self._lockref()
761 761 if l is not None and l.held:
762 762 l.lock()
763 763 return l
764 764
765 765 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
766 766 _('repository %s') % self.origroot)
767 767 self._lockref = weakref.ref(l)
768 768 return l
769 769
770 770 def wlock(self, wait=True):
771 771 '''Lock the non-store parts of the repository (everything under
772 772 .hg except .hg/store) and return a weak reference to the lock.
773 773 Use this before modifying files in .hg.'''
774 774 l = self._wlockref and self._wlockref()
775 775 if l is not None and l.held:
776 776 l.lock()
777 777 return l
778 778
779 779 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
780 780 self.dirstate.invalidate, _('working directory of %s') %
781 781 self.origroot)
782 782 self._wlockref = weakref.ref(l)
783 783 return l
784 784
785 785 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
786 786 """
787 787 commit an individual file as part of a larger transaction
788 788 """
789 789
790 790 fname = fctx.path()
791 791 text = fctx.data()
792 792 flog = self.file(fname)
793 793 fparent1 = manifest1.get(fname, nullid)
794 794 fparent2 = fparent2o = manifest2.get(fname, nullid)
795 795
796 796 meta = {}
797 797 copy = fctx.renamed()
798 798 if copy and copy[0] != fname:
799 799 # Mark the new revision of this file as a copy of another
800 800 # file. This copy data will effectively act as a parent
801 801 # of this new revision. If this is a merge, the first
802 802 # parent will be the nullid (meaning "look up the copy data")
803 803 # and the second one will be the other parent. For example:
804 804 #
805 805 # 0 --- 1 --- 3 rev1 changes file foo
806 806 # \ / rev2 renames foo to bar and changes it
807 807 # \- 2 -/ rev3 should have bar with all changes and
808 808 # should record that bar descends from
809 809 # bar in rev2 and foo in rev1
810 810 #
811 811 # this allows this merge to succeed:
812 812 #
813 813 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
814 814 # \ / merging rev3 and rev4 should use bar@rev2
815 815 # \- 2 --- 4 as the merge base
816 816 #
817 817
818 818 cfname = copy[0]
819 819 crev = manifest1.get(cfname)
820 820 newfparent = fparent2
821 821
822 822 if manifest2: # branch merge
823 823 if fparent2 == nullid or crev is None: # copied on remote side
824 824 if cfname in manifest2:
825 825 crev = manifest2[cfname]
826 826 newfparent = fparent1
827 827
828 828 # find source in nearest ancestor if we've lost track
829 829 if not crev:
830 830 self.ui.debug(" %s: searching for copy revision for %s\n" %
831 831 (fname, cfname))
832 832 for ancestor in self[None].ancestors():
833 833 if cfname in ancestor:
834 834 crev = ancestor[cfname].filenode()
835 835 break
836 836
837 837 if crev:
838 838 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
839 839 meta["copy"] = cfname
840 840 meta["copyrev"] = hex(crev)
841 841 fparent1, fparent2 = nullid, newfparent
842 842 else:
843 843 self.ui.warn(_("warning: can't find ancestor for '%s' "
844 844 "copied from '%s'!\n") % (fname, cfname))
845 845
846 846 elif fparent2 != nullid:
847 847 # is one parent an ancestor of the other?
848 848 fparentancestor = flog.ancestor(fparent1, fparent2)
849 849 if fparentancestor == fparent1:
850 850 fparent1, fparent2 = fparent2, nullid
851 851 elif fparentancestor == fparent2:
852 852 fparent2 = nullid
853 853
854 854 # is the file changed?
855 855 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
856 856 changelist.append(fname)
857 857 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
858 858
859 859 # are just the flags changed during merge?
860 860 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
861 861 changelist.append(fname)
862 862
863 863 return fparent1
864 864
865 865 def commit(self, text="", user=None, date=None, match=None, force=False,
866 866 editor=False, extra={}):
867 867 """Add a new revision to current repository.
868 868
869 869 Revision information is gathered from the working directory,
870 870 match can be used to filter the committed files. If editor is
871 871 supplied, it is called to get a commit message.
872 872 """
873 873
874 874 def fail(f, msg):
875 875 raise util.Abort('%s: %s' % (f, msg))
876 876
877 877 if not match:
878 878 match = matchmod.always(self.root, '')
879 879
880 880 if not force:
881 881 vdirs = []
882 882 match.dir = vdirs.append
883 883 match.bad = fail
884 884
885 885 wlock = self.wlock()
886 886 try:
887 887 wctx = self[None]
888 888 merge = len(wctx.parents()) > 1
889 889
890 890 if (not force and merge and match and
891 891 (match.files() or match.anypats())):
892 892 raise util.Abort(_('cannot partially commit a merge '
893 893 '(do not specify files or patterns)'))
894 894
895 895 changes = self.status(match=match, clean=force)
896 896 if force:
897 897 changes[0].extend(changes[6]) # mq may commit unchanged files
898 898
899 899 # check subrepos
900 900 subs = []
901 901 removedsubs = set()
902 902 for p in wctx.parents():
903 903 removedsubs.update(s for s in p.substate if match(s))
904 904 for s in wctx.substate:
905 905 removedsubs.discard(s)
906 906 if match(s) and wctx.sub(s).dirty():
907 907 subs.append(s)
908 908 if (subs or removedsubs):
909 909 if (not match('.hgsub') and
910 910 '.hgsub' in (wctx.modified() + wctx.added())):
911 911 raise util.Abort(_("can't commit subrepos without .hgsub"))
912 912 if '.hgsubstate' not in changes[0]:
913 913 changes[0].insert(0, '.hgsubstate')
914 914
915 915 # make sure all explicit patterns are matched
916 916 if not force and match.files():
917 917 matched = set(changes[0] + changes[1] + changes[2])
918 918
919 919 for f in match.files():
920 920 if f == '.' or f in matched or f in wctx.substate:
921 921 continue
922 922 if f in changes[3]: # missing
923 923 fail(f, _('file not found!'))
924 924 if f in vdirs: # visited directory
925 925 d = f + '/'
926 926 for mf in matched:
927 927 if mf.startswith(d):
928 928 break
929 929 else:
930 930 fail(f, _("no match under directory!"))
931 931 elif f not in self.dirstate:
932 932 fail(f, _("file not tracked!"))
933 933
934 934 if (not force and not extra.get("close") and not merge
935 935 and not (changes[0] or changes[1] or changes[2])
936 936 and wctx.branch() == wctx.p1().branch()):
937 937 return None
938 938
939 939 ms = mergemod.mergestate(self)
940 940 for f in changes[0]:
941 941 if f in ms and ms[f] == 'u':
942 942 raise util.Abort(_("unresolved merge conflicts "
943 943 "(see hg resolve)"))
944 944
945 945 cctx = context.workingctx(self, text, user, date, extra, changes)
946 946 if editor:
947 947 cctx._text = editor(self, cctx, subs)
948 948 edited = (text != cctx._text)
949 949
950 950 # commit subs
951 951 if subs or removedsubs:
952 pstate = subrepo.substate(self['.'])
953 952 state = wctx.substate.copy()
954 953 for s in sorted(subs):
955 954 sub = wctx.sub(s)
956 955 self.ui.status(_('committing subrepository %s\n') %
957 956 subrepo.subrelpath(sub))
958 957 sr = sub.commit(cctx._text, user, date)
959 958 state[s] = (state[s][0], sr)
960
961 changed = False
962 if len(pstate) != len(state):
963 changed = True
964 if not changed:
965 for newstate in state:
966 if state[newstate][1] != pstate[newstate]:
967 changed = True
968 if changed:
969 subrepo.writestate(self, state)
970 elif (changes[0] == ['.hgsubstate'] and changes[1] == [] and
971 changes[2] == []):
972 return None
959 subrepo.writestate(self, state)
973 960
974 961 # Save commit message in case this transaction gets rolled back
975 962 # (e.g. by a pretxncommit hook). Leave the content alone on
976 963 # the assumption that the user will use the same editor again.
977 964 msgfile = self.opener('last-message.txt', 'wb')
978 965 msgfile.write(cctx._text)
979 966 msgfile.close()
980 967
981 968 p1, p2 = self.dirstate.parents()
982 969 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
983 970 try:
984 971 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
985 972 ret = self.commitctx(cctx, True)
986 973 except:
987 974 if edited:
988 975 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
989 976 self.ui.write(
990 977 _('note: commit message saved in %s\n') % msgfn)
991 978 raise
992 979
993 980 # update dirstate and mergestate
994 981 for f in changes[0] + changes[1]:
995 982 self.dirstate.normal(f)
996 983 for f in changes[2]:
997 984 self.dirstate.forget(f)
998 985 self.dirstate.setparents(ret)
999 986 ms.reset()
1000 987 finally:
1001 988 wlock.release()
1002 989
1003 990 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1004 991 return ret
1005 992
1006 993 def commitctx(self, ctx, error=False):
1007 994 """Add a new revision to current repository.
1008 995 Revision information is passed via the context argument.
1009 996 """
1010 997
1011 998 tr = lock = None
1012 999 removed = list(ctx.removed())
1013 1000 p1, p2 = ctx.p1(), ctx.p2()
1014 1001 m1 = p1.manifest().copy()
1015 1002 m2 = p2.manifest()
1016 1003 user = ctx.user()
1017 1004
1018 1005 lock = self.lock()
1019 1006 try:
1020 1007 tr = self.transaction("commit")
1021 1008 trp = weakref.proxy(tr)
1022 1009
1023 1010 # check in files
1024 1011 new = {}
1025 1012 changed = []
1026 1013 linkrev = len(self)
1027 1014 for f in sorted(ctx.modified() + ctx.added()):
1028 1015 self.ui.note(f + "\n")
1029 1016 try:
1030 1017 fctx = ctx[f]
1031 1018 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1032 1019 changed)
1033 1020 m1.set(f, fctx.flags())
1034 1021 except OSError, inst:
1035 1022 self.ui.warn(_("trouble committing %s!\n") % f)
1036 1023 raise
1037 1024 except IOError, inst:
1038 1025 errcode = getattr(inst, 'errno', errno.ENOENT)
1039 1026 if error or errcode and errcode != errno.ENOENT:
1040 1027 self.ui.warn(_("trouble committing %s!\n") % f)
1041 1028 raise
1042 1029 else:
1043 1030 removed.append(f)
1044 1031
1045 1032 # update manifest
1046 1033 m1.update(new)
1047 1034 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1048 1035 drop = [f for f in removed if f in m1]
1049 1036 for f in drop:
1050 1037 del m1[f]
1051 1038 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1052 1039 p2.manifestnode(), (new, drop))
1053 1040
1054 1041 # update changelog
1055 1042 self.changelog.delayupdate()
1056 1043 n = self.changelog.add(mn, changed + removed, ctx.description(),
1057 1044 trp, p1.node(), p2.node(),
1058 1045 user, ctx.date(), ctx.extra().copy())
1059 1046 p = lambda: self.changelog.writepending() and self.root or ""
1060 1047 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1061 1048 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1062 1049 parent2=xp2, pending=p)
1063 1050 self.changelog.finalize(trp)
1064 1051 tr.close()
1065 1052
1066 1053 if self._branchcache:
1067 1054 self.updatebranchcache()
1068 1055 return n
1069 1056 finally:
1070 1057 if tr:
1071 1058 tr.release()
1072 1059 lock.release()
1073 1060
1074 1061 def destroyed(self):
1075 1062 '''Inform the repository that nodes have been destroyed.
1076 1063 Intended for use by strip and rollback, so there's a common
1077 1064 place for anything that has to be done after destroying history.'''
1078 1065 # XXX it might be nice if we could take the list of destroyed
1079 1066 # nodes, but I don't see an easy way for rollback() to do that
1080 1067
1081 1068 # Ensure the persistent tag cache is updated. Doing it now
1082 1069 # means that the tag cache only has to worry about destroyed
1083 1070 # heads immediately after a strip/rollback. That in turn
1084 1071 # guarantees that "cachetip == currenttip" (comparing both rev
1085 1072 # and node) always means no nodes have been added or destroyed.
1086 1073
1087 1074 # XXX this is suboptimal when qrefresh'ing: we strip the current
1088 1075 # head, refresh the tag cache, then immediately add a new head.
1089 1076 # But I think doing it this way is necessary for the "instant
1090 1077 # tag cache retrieval" case to work.
1091 1078 self.invalidatecaches()
1092 1079
1093 1080 def walk(self, match, node=None):
1094 1081 '''
1095 1082 walk recursively through the directory tree or a given
1096 1083 changeset, finding all files matched by the match
1097 1084 function
1098 1085 '''
1099 1086 return self[node].walk(match)
1100 1087
1101 1088 def status(self, node1='.', node2=None, match=None,
1102 1089 ignored=False, clean=False, unknown=False,
1103 1090 listsubrepos=False):
1104 1091 """return status of files between two nodes or node and working directory
1105 1092
1106 1093 If node1 is None, use the first dirstate parent instead.
1107 1094 If node2 is None, compare node1 with working directory.
1108 1095 """
1109 1096
1110 1097 def mfmatches(ctx):
1111 1098 mf = ctx.manifest().copy()
1112 1099 for fn in mf.keys():
1113 1100 if not match(fn):
1114 1101 del mf[fn]
1115 1102 return mf
1116 1103
1117 1104 if isinstance(node1, context.changectx):
1118 1105 ctx1 = node1
1119 1106 else:
1120 1107 ctx1 = self[node1]
1121 1108 if isinstance(node2, context.changectx):
1122 1109 ctx2 = node2
1123 1110 else:
1124 1111 ctx2 = self[node2]
1125 1112
1126 1113 working = ctx2.rev() is None
1127 1114 parentworking = working and ctx1 == self['.']
1128 1115 match = match or matchmod.always(self.root, self.getcwd())
1129 1116 listignored, listclean, listunknown = ignored, clean, unknown
1130 1117
1131 1118 # load earliest manifest first for caching reasons
1132 1119 if not working and ctx2.rev() < ctx1.rev():
1133 1120 ctx2.manifest()
1134 1121
1135 1122 if not parentworking:
1136 1123 def bad(f, msg):
1137 1124 if f not in ctx1:
1138 1125 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1139 1126 match.bad = bad
1140 1127
1141 1128 if working: # we need to scan the working dir
1142 1129 subrepos = []
1143 1130 if '.hgsub' in self.dirstate:
1144 1131 subrepos = ctx1.substate.keys()
1145 1132 s = self.dirstate.status(match, subrepos, listignored,
1146 1133 listclean, listunknown)
1147 1134 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1148 1135
1149 1136 # check for any possibly clean files
1150 1137 if parentworking and cmp:
1151 1138 fixup = []
1152 1139 # do a full compare of any files that might have changed
1153 1140 for f in sorted(cmp):
1154 1141 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1155 1142 or ctx1[f].cmp(ctx2[f])):
1156 1143 modified.append(f)
1157 1144 else:
1158 1145 fixup.append(f)
1159 1146
1160 1147 # update dirstate for files that are actually clean
1161 1148 if fixup:
1162 1149 if listclean:
1163 1150 clean += fixup
1164 1151
1165 1152 try:
1166 1153 # updating the dirstate is optional
1167 1154 # so we don't wait on the lock
1168 1155 wlock = self.wlock(False)
1169 1156 try:
1170 1157 for f in fixup:
1171 1158 self.dirstate.normal(f)
1172 1159 finally:
1173 1160 wlock.release()
1174 1161 except error.LockError:
1175 1162 pass
1176 1163
1177 1164 if not parentworking:
1178 1165 mf1 = mfmatches(ctx1)
1179 1166 if working:
1180 1167 # we are comparing working dir against non-parent
1181 1168 # generate a pseudo-manifest for the working dir
1182 1169 mf2 = mfmatches(self['.'])
1183 1170 for f in cmp + modified + added:
1184 1171 mf2[f] = None
1185 1172 mf2.set(f, ctx2.flags(f))
1186 1173 for f in removed:
1187 1174 if f in mf2:
1188 1175 del mf2[f]
1189 1176 else:
1190 1177 # we are comparing two revisions
1191 1178 deleted, unknown, ignored = [], [], []
1192 1179 mf2 = mfmatches(ctx2)
1193 1180
1194 1181 modified, added, clean = [], [], []
1195 1182 for fn in mf2:
1196 1183 if fn in mf1:
1197 1184 if (mf1.flags(fn) != mf2.flags(fn) or
1198 1185 (mf1[fn] != mf2[fn] and
1199 1186 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1200 1187 modified.append(fn)
1201 1188 elif listclean:
1202 1189 clean.append(fn)
1203 1190 del mf1[fn]
1204 1191 else:
1205 1192 added.append(fn)
1206 1193 removed = mf1.keys()
1207 1194
1208 1195 r = modified, added, removed, deleted, unknown, ignored, clean
1209 1196
1210 1197 if listsubrepos:
1211 1198 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1212 1199 if working:
1213 1200 rev2 = None
1214 1201 else:
1215 1202 rev2 = ctx2.substate[subpath][1]
1216 1203 try:
1217 1204 submatch = matchmod.narrowmatcher(subpath, match)
1218 1205 s = sub.status(rev2, match=submatch, ignored=listignored,
1219 1206 clean=listclean, unknown=listunknown,
1220 1207 listsubrepos=True)
1221 1208 for rfiles, sfiles in zip(r, s):
1222 1209 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1223 1210 except error.LookupError:
1224 1211 self.ui.status(_("skipping missing subrepository: %s\n")
1225 1212 % subpath)
1226 1213
1227 1214 [l.sort() for l in r]
1228 1215 return r
1229 1216
1230 1217 def heads(self, start=None):
1231 1218 heads = self.changelog.heads(start)
1232 1219 # sort the output in rev descending order
1233 1220 return sorted(heads, key=self.changelog.rev, reverse=True)
1234 1221
1235 1222 def branchheads(self, branch=None, start=None, closed=False):
1236 1223 '''return a (possibly filtered) list of heads for the given branch
1237 1224
1238 1225 Heads are returned in topological order, from newest to oldest.
1239 1226 If branch is None, use the dirstate branch.
1240 1227 If start is not None, return only heads reachable from start.
1241 1228 If closed is True, return heads that are marked as closed as well.
1242 1229 '''
1243 1230 if branch is None:
1244 1231 branch = self[None].branch()
1245 1232 branches = self.branchmap()
1246 1233 if branch not in branches:
1247 1234 return []
1248 1235 # the cache returns heads ordered lowest to highest
1249 1236 bheads = list(reversed(branches[branch]))
1250 1237 if start is not None:
1251 1238 # filter out the heads that cannot be reached from startrev
1252 1239 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1253 1240 bheads = [h for h in bheads if h in fbheads]
1254 1241 if not closed:
1255 1242 bheads = [h for h in bheads if
1256 1243 ('close' not in self.changelog.read(h)[5])]
1257 1244 return bheads
1258 1245
1259 1246 def branches(self, nodes):
1260 1247 if not nodes:
1261 1248 nodes = [self.changelog.tip()]
1262 1249 b = []
1263 1250 for n in nodes:
1264 1251 t = n
1265 1252 while 1:
1266 1253 p = self.changelog.parents(n)
1267 1254 if p[1] != nullid or p[0] == nullid:
1268 1255 b.append((t, n, p[0], p[1]))
1269 1256 break
1270 1257 n = p[0]
1271 1258 return b
1272 1259
1273 1260 def between(self, pairs):
1274 1261 r = []
1275 1262
1276 1263 for top, bottom in pairs:
1277 1264 n, l, i = top, [], 0
1278 1265 f = 1
1279 1266
1280 1267 while n != bottom and n != nullid:
1281 1268 p = self.changelog.parents(n)[0]
1282 1269 if i == f:
1283 1270 l.append(n)
1284 1271 f = f * 2
1285 1272 n = p
1286 1273 i += 1
1287 1274
1288 1275 r.append(l)
1289 1276
1290 1277 return r
1291 1278
1292 1279 def pull(self, remote, heads=None, force=False):
1293 1280 lock = self.lock()
1294 1281 try:
1295 1282 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1296 1283 force=force)
1297 1284 common, fetch, rheads = tmp
1298 1285 if not fetch:
1299 1286 self.ui.status(_("no changes found\n"))
1300 1287 return 0
1301 1288
1302 1289 if heads is None and fetch == [nullid]:
1303 1290 self.ui.status(_("requesting all changes\n"))
1304 1291 elif heads is None and remote.capable('changegroupsubset'):
1305 1292 # issue1320, avoid a race if remote changed after discovery
1306 1293 heads = rheads
1307 1294
1308 1295 if heads is None:
1309 1296 cg = remote.changegroup(fetch, 'pull')
1310 1297 else:
1311 1298 if not remote.capable('changegroupsubset'):
1312 1299 raise util.Abort(_("partial pull cannot be done because "
1313 1300 "other repository doesn't support "
1314 1301 "changegroupsubset."))
1315 1302 cg = remote.changegroupsubset(fetch, heads, 'pull')
1316 1303 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1317 1304 finally:
1318 1305 lock.release()
1319 1306
1320 1307 def push(self, remote, force=False, revs=None, newbranch=False):
1321 1308 '''Push outgoing changesets (limited by revs) from the current
1322 1309 repository to remote. Return an integer:
1323 1310 - 0 means HTTP error *or* nothing to push
1324 1311 - 1 means we pushed and remote head count is unchanged *or*
1325 1312 we have outgoing changesets but refused to push
1326 1313 - other values as described by addchangegroup()
1327 1314 '''
1328 1315 # there are two ways to push to remote repo:
1329 1316 #
1330 1317 # addchangegroup assumes local user can lock remote
1331 1318 # repo (local filesystem, old ssh servers).
1332 1319 #
1333 1320 # unbundle assumes local user cannot lock remote repo (new ssh
1334 1321 # servers, http servers).
1335 1322
1336 1323 lock = None
1337 1324 unbundle = remote.capable('unbundle')
1338 1325 if not unbundle:
1339 1326 lock = remote.lock()
1340 1327 try:
1341 1328 ret = discovery.prepush(self, remote, force, revs, newbranch)
1342 1329 if ret[0] is None:
1343 1330 # and here we return 0 for "nothing to push" or 1 for
1344 1331 # "something to push but I refuse"
1345 1332 return ret[1]
1346 1333
1347 1334 cg, remote_heads = ret
1348 1335 if unbundle:
1349 1336 # local repo finds heads on server, finds out what revs it must
1350 1337 # push. once revs transferred, if server finds it has
1351 1338 # different heads (someone else won commit/push race), server
1352 1339 # aborts.
1353 1340 if force:
1354 1341 remote_heads = ['force']
1355 1342 # ssh: return remote's addchangegroup()
1356 1343 # http: return remote's addchangegroup() or 0 for error
1357 1344 return remote.unbundle(cg, remote_heads, 'push')
1358 1345 else:
1359 1346 # we return an integer indicating remote head count change
1360 1347 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1361 1348 finally:
1362 1349 if lock is not None:
1363 1350 lock.release()
1364 1351
1365 1352 def changegroupinfo(self, nodes, source):
1366 1353 if self.ui.verbose or source == 'bundle':
1367 1354 self.ui.status(_("%d changesets found\n") % len(nodes))
1368 1355 if self.ui.debugflag:
1369 1356 self.ui.debug("list of changesets:\n")
1370 1357 for node in nodes:
1371 1358 self.ui.debug("%s\n" % hex(node))
1372 1359
1373 1360 def changegroupsubset(self, bases, heads, source, extranodes=None):
1374 1361 """Compute a changegroup consisting of all the nodes that are
1375 1362 descendents of any of the bases and ancestors of any of the heads.
1376 1363 Return a chunkbuffer object whose read() method will return
1377 1364 successive changegroup chunks.
1378 1365
1379 1366 It is fairly complex as determining which filenodes and which
1380 1367 manifest nodes need to be included for the changeset to be complete
1381 1368 is non-trivial.
1382 1369
1383 1370 Another wrinkle is doing the reverse, figuring out which changeset in
1384 1371 the changegroup a particular filenode or manifestnode belongs to.
1385 1372
1386 1373 The caller can specify some nodes that must be included in the
1387 1374 changegroup using the extranodes argument. It should be a dict
1388 1375 where the keys are the filenames (or 1 for the manifest), and the
1389 1376 values are lists of (node, linknode) tuples, where node is a wanted
1390 1377 node and linknode is the changelog node that should be transmitted as
1391 1378 the linkrev.
1392 1379 """
1393 1380
1394 1381 # Set up some initial variables
1395 1382 # Make it easy to refer to self.changelog
1396 1383 cl = self.changelog
1397 1384 # Compute the list of changesets in this changegroup.
1398 1385 # Some bases may turn out to be superfluous, and some heads may be
1399 1386 # too. nodesbetween will return the minimal set of bases and heads
1400 1387 # necessary to re-create the changegroup.
1401 1388 if not bases:
1402 1389 bases = [nullid]
1403 1390 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1404 1391
1405 1392 if extranodes is None:
1406 1393 # can we go through the fast path ?
1407 1394 heads.sort()
1408 1395 allheads = self.heads()
1409 1396 allheads.sort()
1410 1397 if heads == allheads:
1411 1398 return self._changegroup(msng_cl_lst, source)
1412 1399
1413 1400 # slow path
1414 1401 self.hook('preoutgoing', throw=True, source=source)
1415 1402
1416 1403 self.changegroupinfo(msng_cl_lst, source)
1417 1404
1418 1405 # We assume that all ancestors of bases are known
1419 1406 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1420 1407
1421 1408 # Make it easy to refer to self.manifest
1422 1409 mnfst = self.manifest
1423 1410 # We don't know which manifests are missing yet
1424 1411 msng_mnfst_set = {}
1425 1412 # Nor do we know which filenodes are missing.
1426 1413 msng_filenode_set = {}
1427 1414
1428 1415 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1429 1416 junk = None
1430 1417
1431 1418 # A changeset always belongs to itself, so the changenode lookup
1432 1419 # function for a changenode is identity.
1433 1420 def identity(x):
1434 1421 return x
1435 1422
1436 1423 # A function generating function that sets up the initial environment
1437 1424 # the inner function.
1438 1425 def filenode_collector(changedfiles):
1439 1426 # This gathers information from each manifestnode included in the
1440 1427 # changegroup about which filenodes the manifest node references
1441 1428 # so we can include those in the changegroup too.
1442 1429 #
1443 1430 # It also remembers which changenode each filenode belongs to. It
1444 1431 # does this by assuming the a filenode belongs to the changenode
1445 1432 # the first manifest that references it belongs to.
1446 1433 def collect_msng_filenodes(mnfstnode):
1447 1434 r = mnfst.rev(mnfstnode)
1448 1435 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1449 1436 # If the previous rev is one of the parents,
1450 1437 # we only need to see a diff.
1451 1438 deltamf = mnfst.readdelta(mnfstnode)
1452 1439 # For each line in the delta
1453 1440 for f, fnode in deltamf.iteritems():
1454 1441 # And if the file is in the list of files we care
1455 1442 # about.
1456 1443 if f in changedfiles:
1457 1444 # Get the changenode this manifest belongs to
1458 1445 clnode = msng_mnfst_set[mnfstnode]
1459 1446 # Create the set of filenodes for the file if
1460 1447 # there isn't one already.
1461 1448 ndset = msng_filenode_set.setdefault(f, {})
1462 1449 # And set the filenode's changelog node to the
1463 1450 # manifest's if it hasn't been set already.
1464 1451 ndset.setdefault(fnode, clnode)
1465 1452 else:
1466 1453 # Otherwise we need a full manifest.
1467 1454 m = mnfst.read(mnfstnode)
1468 1455 # For every file in we care about.
1469 1456 for f in changedfiles:
1470 1457 fnode = m.get(f, None)
1471 1458 # If it's in the manifest
1472 1459 if fnode is not None:
1473 1460 # See comments above.
1474 1461 clnode = msng_mnfst_set[mnfstnode]
1475 1462 ndset = msng_filenode_set.setdefault(f, {})
1476 1463 ndset.setdefault(fnode, clnode)
1477 1464 return collect_msng_filenodes
1478 1465
1479 1466 # If we determine that a particular file or manifest node must be a
1480 1467 # node that the recipient of the changegroup will already have, we can
1481 1468 # also assume the recipient will have all the parents. This function
1482 1469 # prunes them from the set of missing nodes.
1483 1470 def prune(revlog, missingnodes):
1484 1471 hasset = set()
1485 1472 # If a 'missing' filenode thinks it belongs to a changenode we
1486 1473 # assume the recipient must have, then the recipient must have
1487 1474 # that filenode.
1488 1475 for n in missingnodes:
1489 1476 clrev = revlog.linkrev(revlog.rev(n))
1490 1477 if clrev in commonrevs:
1491 1478 hasset.add(n)
1492 1479 for n in hasset:
1493 1480 missingnodes.pop(n, None)
1494 1481 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1495 1482 missingnodes.pop(revlog.node(r), None)
1496 1483
1497 1484 # Add the nodes that were explicitly requested.
1498 1485 def add_extra_nodes(name, nodes):
1499 1486 if not extranodes or name not in extranodes:
1500 1487 return
1501 1488
1502 1489 for node, linknode in extranodes[name]:
1503 1490 if node not in nodes:
1504 1491 nodes[node] = linknode
1505 1492
1506 1493 # Now that we have all theses utility functions to help out and
1507 1494 # logically divide up the task, generate the group.
1508 1495 def gengroup():
1509 1496 # The set of changed files starts empty.
1510 1497 changedfiles = set()
1511 1498 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1512 1499
1513 1500 # Create a changenode group generator that will call our functions
1514 1501 # back to lookup the owning changenode and collect information.
1515 1502 group = cl.group(msng_cl_lst, identity, collect)
1516 1503 for cnt, chnk in enumerate(group):
1517 1504 yield chnk
1518 1505 # revlog.group yields three entries per node, so
1519 1506 # dividing by 3 gives an approximation of how many
1520 1507 # nodes have been processed.
1521 1508 self.ui.progress(_('bundling'), cnt / 3,
1522 1509 unit=_('changesets'))
1523 1510 changecount = cnt / 3
1524 1511 self.ui.progress(_('bundling'), None)
1525 1512
1526 1513 prune(mnfst, msng_mnfst_set)
1527 1514 add_extra_nodes(1, msng_mnfst_set)
1528 1515 msng_mnfst_lst = msng_mnfst_set.keys()
1529 1516 # Sort the manifestnodes by revision number.
1530 1517 msng_mnfst_lst.sort(key=mnfst.rev)
1531 1518 # Create a generator for the manifestnodes that calls our lookup
1532 1519 # and data collection functions back.
1533 1520 group = mnfst.group(msng_mnfst_lst,
1534 1521 lambda mnode: msng_mnfst_set[mnode],
1535 1522 filenode_collector(changedfiles))
1536 1523 efiles = {}
1537 1524 for cnt, chnk in enumerate(group):
1538 1525 if cnt % 3 == 1:
1539 1526 mnode = chnk[:20]
1540 1527 efiles.update(mnfst.readdelta(mnode))
1541 1528 yield chnk
1542 1529 # see above comment for why we divide by 3
1543 1530 self.ui.progress(_('bundling'), cnt / 3,
1544 1531 unit=_('manifests'), total=changecount)
1545 1532 self.ui.progress(_('bundling'), None)
1546 1533 efiles = len(efiles)
1547 1534
1548 1535 # These are no longer needed, dereference and toss the memory for
1549 1536 # them.
1550 1537 msng_mnfst_lst = None
1551 1538 msng_mnfst_set.clear()
1552 1539
1553 1540 if extranodes:
1554 1541 for fname in extranodes:
1555 1542 if isinstance(fname, int):
1556 1543 continue
1557 1544 msng_filenode_set.setdefault(fname, {})
1558 1545 changedfiles.add(fname)
1559 1546 # Go through all our files in order sorted by name.
1560 1547 for idx, fname in enumerate(sorted(changedfiles)):
1561 1548 filerevlog = self.file(fname)
1562 1549 if not len(filerevlog):
1563 1550 raise util.Abort(_("empty or missing revlog for %s") % fname)
1564 1551 # Toss out the filenodes that the recipient isn't really
1565 1552 # missing.
1566 1553 missingfnodes = msng_filenode_set.pop(fname, {})
1567 1554 prune(filerevlog, missingfnodes)
1568 1555 add_extra_nodes(fname, missingfnodes)
1569 1556 # If any filenodes are left, generate the group for them,
1570 1557 # otherwise don't bother.
1571 1558 if missingfnodes:
1572 1559 yield changegroup.chunkheader(len(fname))
1573 1560 yield fname
1574 1561 # Sort the filenodes by their revision # (topological order)
1575 1562 nodeiter = list(missingfnodes)
1576 1563 nodeiter.sort(key=filerevlog.rev)
1577 1564 # Create a group generator and only pass in a changenode
1578 1565 # lookup function as we need to collect no information
1579 1566 # from filenodes.
1580 1567 group = filerevlog.group(nodeiter,
1581 1568 lambda fnode: missingfnodes[fnode])
1582 1569 for chnk in group:
1583 1570 # even though we print the same progress on
1584 1571 # most loop iterations, put the progress call
1585 1572 # here so that time estimates (if any) can be updated
1586 1573 self.ui.progress(
1587 1574 _('bundling'), idx, item=fname,
1588 1575 unit=_('files'), total=efiles)
1589 1576 yield chnk
1590 1577 # Signal that no more groups are left.
1591 1578 yield changegroup.closechunk()
1592 1579 self.ui.progress(_('bundling'), None)
1593 1580
1594 1581 if msng_cl_lst:
1595 1582 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1596 1583
1597 1584 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1598 1585
1599 1586 def changegroup(self, basenodes, source):
1600 1587 # to avoid a race we use changegroupsubset() (issue1320)
1601 1588 return self.changegroupsubset(basenodes, self.heads(), source)
1602 1589
1603 1590 def _changegroup(self, nodes, source):
1604 1591 """Compute the changegroup of all nodes that we have that a recipient
1605 1592 doesn't. Return a chunkbuffer object whose read() method will return
1606 1593 successive changegroup chunks.
1607 1594
1608 1595 This is much easier than the previous function as we can assume that
1609 1596 the recipient has any changenode we aren't sending them.
1610 1597
1611 1598 nodes is the set of nodes to send"""
1612 1599
1613 1600 self.hook('preoutgoing', throw=True, source=source)
1614 1601
1615 1602 cl = self.changelog
1616 1603 revset = set([cl.rev(n) for n in nodes])
1617 1604 self.changegroupinfo(nodes, source)
1618 1605
1619 1606 def identity(x):
1620 1607 return x
1621 1608
1622 1609 def gennodelst(log):
1623 1610 for r in log:
1624 1611 if log.linkrev(r) in revset:
1625 1612 yield log.node(r)
1626 1613
1627 1614 def lookuplinkrev_func(revlog):
1628 1615 def lookuplinkrev(n):
1629 1616 return cl.node(revlog.linkrev(revlog.rev(n)))
1630 1617 return lookuplinkrev
1631 1618
1632 1619 def gengroup():
1633 1620 '''yield a sequence of changegroup chunks (strings)'''
1634 1621 # construct a list of all changed files
1635 1622 changedfiles = set()
1636 1623 mmfs = {}
1637 1624 collect = changegroup.collector(cl, mmfs, changedfiles)
1638 1625
1639 1626 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1640 1627 # revlog.group yields three entries per node, so
1641 1628 # dividing by 3 gives an approximation of how many
1642 1629 # nodes have been processed.
1643 1630 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1644 1631 yield chnk
1645 1632 changecount = cnt / 3
1646 1633 self.ui.progress(_('bundling'), None)
1647 1634
1648 1635 mnfst = self.manifest
1649 1636 nodeiter = gennodelst(mnfst)
1650 1637 efiles = {}
1651 1638 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1652 1639 lookuplinkrev_func(mnfst))):
1653 1640 if cnt % 3 == 1:
1654 1641 mnode = chnk[:20]
1655 1642 efiles.update(mnfst.readdelta(mnode))
1656 1643 # see above comment for why we divide by 3
1657 1644 self.ui.progress(_('bundling'), cnt / 3,
1658 1645 unit=_('manifests'), total=changecount)
1659 1646 yield chnk
1660 1647 efiles = len(efiles)
1661 1648 self.ui.progress(_('bundling'), None)
1662 1649
1663 1650 for idx, fname in enumerate(sorted(changedfiles)):
1664 1651 filerevlog = self.file(fname)
1665 1652 if not len(filerevlog):
1666 1653 raise util.Abort(_("empty or missing revlog for %s") % fname)
1667 1654 nodeiter = gennodelst(filerevlog)
1668 1655 nodeiter = list(nodeiter)
1669 1656 if nodeiter:
1670 1657 yield changegroup.chunkheader(len(fname))
1671 1658 yield fname
1672 1659 lookup = lookuplinkrev_func(filerevlog)
1673 1660 for chnk in filerevlog.group(nodeiter, lookup):
1674 1661 self.ui.progress(
1675 1662 _('bundling'), idx, item=fname,
1676 1663 total=efiles, unit=_('files'))
1677 1664 yield chnk
1678 1665 self.ui.progress(_('bundling'), None)
1679 1666
1680 1667 yield changegroup.closechunk()
1681 1668
1682 1669 if nodes:
1683 1670 self.hook('outgoing', node=hex(nodes[0]), source=source)
1684 1671
1685 1672 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1686 1673
1687 1674 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1688 1675 """Add the changegroup returned by source.read() to this repo.
1689 1676 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1690 1677 the URL of the repo where this changegroup is coming from.
1691 1678
1692 1679 Return an integer summarizing the change to this repo:
1693 1680 - nothing changed or no source: 0
1694 1681 - more heads than before: 1+added heads (2..n)
1695 1682 - fewer heads than before: -1-removed heads (-2..-n)
1696 1683 - number of heads stays the same: 1
1697 1684 """
1698 1685 def csmap(x):
1699 1686 self.ui.debug("add changeset %s\n" % short(x))
1700 1687 return len(cl)
1701 1688
1702 1689 def revmap(x):
1703 1690 return cl.rev(x)
1704 1691
1705 1692 if not source:
1706 1693 return 0
1707 1694
1708 1695 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1709 1696
1710 1697 changesets = files = revisions = 0
1711 1698 efiles = set()
1712 1699
1713 1700 # write changelog data to temp files so concurrent readers will not see
1714 1701 # inconsistent view
1715 1702 cl = self.changelog
1716 1703 cl.delayupdate()
1717 1704 oldheads = len(cl.heads())
1718 1705
1719 1706 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1720 1707 try:
1721 1708 trp = weakref.proxy(tr)
1722 1709 # pull off the changeset group
1723 1710 self.ui.status(_("adding changesets\n"))
1724 1711 clstart = len(cl)
1725 1712 class prog(object):
1726 1713 step = _('changesets')
1727 1714 count = 1
1728 1715 ui = self.ui
1729 1716 total = None
1730 1717 def __call__(self):
1731 1718 self.ui.progress(self.step, self.count, unit=_('chunks'),
1732 1719 total=self.total)
1733 1720 self.count += 1
1734 1721 pr = prog()
1735 1722 source.callback = pr
1736 1723
1737 1724 if (cl.addgroup(source, csmap, trp) is None
1738 1725 and not emptyok):
1739 1726 raise util.Abort(_("received changelog group is empty"))
1740 1727 clend = len(cl)
1741 1728 changesets = clend - clstart
1742 1729 for c in xrange(clstart, clend):
1743 1730 efiles.update(self[c].files())
1744 1731 efiles = len(efiles)
1745 1732 self.ui.progress(_('changesets'), None)
1746 1733
1747 1734 # pull off the manifest group
1748 1735 self.ui.status(_("adding manifests\n"))
1749 1736 pr.step = _('manifests')
1750 1737 pr.count = 1
1751 1738 pr.total = changesets # manifests <= changesets
1752 1739 # no need to check for empty manifest group here:
1753 1740 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1754 1741 # no new manifest will be created and the manifest group will
1755 1742 # be empty during the pull
1756 1743 self.manifest.addgroup(source, revmap, trp)
1757 1744 self.ui.progress(_('manifests'), None)
1758 1745
1759 1746 needfiles = {}
1760 1747 if self.ui.configbool('server', 'validate', default=False):
1761 1748 # validate incoming csets have their manifests
1762 1749 for cset in xrange(clstart, clend):
1763 1750 mfest = self.changelog.read(self.changelog.node(cset))[0]
1764 1751 mfest = self.manifest.readdelta(mfest)
1765 1752 # store file nodes we must see
1766 1753 for f, n in mfest.iteritems():
1767 1754 needfiles.setdefault(f, set()).add(n)
1768 1755
1769 1756 # process the files
1770 1757 self.ui.status(_("adding file changes\n"))
1771 1758 pr.step = 'files'
1772 1759 pr.count = 1
1773 1760 pr.total = efiles
1774 1761 source.callback = None
1775 1762
1776 1763 while 1:
1777 1764 f = source.chunk()
1778 1765 if not f:
1779 1766 break
1780 1767 self.ui.debug("adding %s revisions\n" % f)
1781 1768 pr()
1782 1769 fl = self.file(f)
1783 1770 o = len(fl)
1784 1771 if fl.addgroup(source, revmap, trp) is None:
1785 1772 raise util.Abort(_("received file revlog group is empty"))
1786 1773 revisions += len(fl) - o
1787 1774 files += 1
1788 1775 if f in needfiles:
1789 1776 needs = needfiles[f]
1790 1777 for new in xrange(o, len(fl)):
1791 1778 n = fl.node(new)
1792 1779 if n in needs:
1793 1780 needs.remove(n)
1794 1781 if not needs:
1795 1782 del needfiles[f]
1796 1783 self.ui.progress(_('files'), None)
1797 1784
1798 1785 for f, needs in needfiles.iteritems():
1799 1786 fl = self.file(f)
1800 1787 for n in needs:
1801 1788 try:
1802 1789 fl.rev(n)
1803 1790 except error.LookupError:
1804 1791 raise util.Abort(
1805 1792 _('missing file data for %s:%s - run hg verify') %
1806 1793 (f, hex(n)))
1807 1794
1808 1795 newheads = len(cl.heads())
1809 1796 heads = ""
1810 1797 if oldheads and newheads != oldheads:
1811 1798 heads = _(" (%+d heads)") % (newheads - oldheads)
1812 1799
1813 1800 self.ui.status(_("added %d changesets"
1814 1801 " with %d changes to %d files%s\n")
1815 1802 % (changesets, revisions, files, heads))
1816 1803
1817 1804 if changesets > 0:
1818 1805 p = lambda: cl.writepending() and self.root or ""
1819 1806 self.hook('pretxnchangegroup', throw=True,
1820 1807 node=hex(cl.node(clstart)), source=srctype,
1821 1808 url=url, pending=p)
1822 1809
1823 1810 # make changelog see real files again
1824 1811 cl.finalize(trp)
1825 1812
1826 1813 tr.close()
1827 1814 finally:
1828 1815 tr.release()
1829 1816 if lock:
1830 1817 lock.release()
1831 1818
1832 1819 if changesets > 0:
1833 1820 # forcefully update the on-disk branch cache
1834 1821 self.ui.debug("updating the branch cache\n")
1835 1822 self.updatebranchcache()
1836 1823 self.hook("changegroup", node=hex(cl.node(clstart)),
1837 1824 source=srctype, url=url)
1838 1825
1839 1826 for i in xrange(clstart, clend):
1840 1827 self.hook("incoming", node=hex(cl.node(i)),
1841 1828 source=srctype, url=url)
1842 1829
1843 1830 # never return 0 here:
1844 1831 if newheads < oldheads:
1845 1832 return newheads - oldheads - 1
1846 1833 else:
1847 1834 return newheads - oldheads + 1
1848 1835
1849 1836
1850 1837 def stream_in(self, remote, requirements):
1851 1838 fp = remote.stream_out()
1852 1839 l = fp.readline()
1853 1840 try:
1854 1841 resp = int(l)
1855 1842 except ValueError:
1856 1843 raise error.ResponseError(
1857 1844 _('Unexpected response from remote server:'), l)
1858 1845 if resp == 1:
1859 1846 raise util.Abort(_('operation forbidden by server'))
1860 1847 elif resp == 2:
1861 1848 raise util.Abort(_('locking the remote repository failed'))
1862 1849 elif resp != 0:
1863 1850 raise util.Abort(_('the server sent an unknown error code'))
1864 1851 self.ui.status(_('streaming all changes\n'))
1865 1852 l = fp.readline()
1866 1853 try:
1867 1854 total_files, total_bytes = map(int, l.split(' ', 1))
1868 1855 except (ValueError, TypeError):
1869 1856 raise error.ResponseError(
1870 1857 _('Unexpected response from remote server:'), l)
1871 1858 self.ui.status(_('%d files to transfer, %s of data\n') %
1872 1859 (total_files, util.bytecount(total_bytes)))
1873 1860 start = time.time()
1874 1861 for i in xrange(total_files):
1875 1862 # XXX doesn't support '\n' or '\r' in filenames
1876 1863 l = fp.readline()
1877 1864 try:
1878 1865 name, size = l.split('\0', 1)
1879 1866 size = int(size)
1880 1867 except (ValueError, TypeError):
1881 1868 raise error.ResponseError(
1882 1869 _('Unexpected response from remote server:'), l)
1883 1870 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1884 1871 # for backwards compat, name was partially encoded
1885 1872 ofp = self.sopener(store.decodedir(name), 'w')
1886 1873 for chunk in util.filechunkiter(fp, limit=size):
1887 1874 ofp.write(chunk)
1888 1875 ofp.close()
1889 1876 elapsed = time.time() - start
1890 1877 if elapsed <= 0:
1891 1878 elapsed = 0.001
1892 1879 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1893 1880 (util.bytecount(total_bytes), elapsed,
1894 1881 util.bytecount(total_bytes / elapsed)))
1895 1882
1896 1883 # new requirements = old non-format requirements + new format-related
1897 1884 # requirements from the streamed-in repository
1898 1885 requirements.update(set(self.requirements) - self.supportedformats)
1899 1886 self._applyrequirements(requirements)
1900 1887 self._writerequirements()
1901 1888
1902 1889 self.invalidate()
1903 1890 return len(self.heads()) + 1
1904 1891
1905 1892 def clone(self, remote, heads=[], stream=False):
1906 1893 '''clone remote repository.
1907 1894
1908 1895 keyword arguments:
1909 1896 heads: list of revs to clone (forces use of pull)
1910 1897 stream: use streaming clone if possible'''
1911 1898
1912 1899 # now, all clients that can request uncompressed clones can
1913 1900 # read repo formats supported by all servers that can serve
1914 1901 # them.
1915 1902
1916 1903 # if revlog format changes, client will have to check version
1917 1904 # and format flags on "stream" capability, and use
1918 1905 # uncompressed only if compatible.
1919 1906
1920 1907 if stream and not heads:
1921 1908 # 'stream' means remote revlog format is revlogv1 only
1922 1909 if remote.capable('stream'):
1923 1910 return self.stream_in(remote, set(('revlogv1',)))
1924 1911 # otherwise, 'streamreqs' contains the remote revlog format
1925 1912 streamreqs = remote.capable('streamreqs')
1926 1913 if streamreqs:
1927 1914 streamreqs = set(streamreqs.split(','))
1928 1915 # if we support it, stream in and adjust our requirements
1929 1916 if not streamreqs - self.supportedformats:
1930 1917 return self.stream_in(remote, streamreqs)
1931 1918 return self.pull(remote, heads)
1932 1919
1933 1920 def pushkey(self, namespace, key, old, new):
1934 1921 return pushkey.push(self, namespace, key, old, new)
1935 1922
1936 1923 def listkeys(self, namespace):
1937 1924 return pushkey.list(self, namespace)
1938 1925
1939 1926 # used to avoid circular references so destructors work
1940 1927 def aftertrans(files):
1941 1928 renamefiles = [tuple(t) for t in files]
1942 1929 def a():
1943 1930 for src, dest in renamefiles:
1944 1931 util.rename(src, dest)
1945 1932 return a
1946 1933
1947 1934 def instance(ui, path, create):
1948 1935 return localrepository(ui, util.drop_scheme('file', path), create)
1949 1936
1950 1937 def islocal(path):
1951 1938 return True
@@ -1,891 +1,886 b''
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
9 9 import stat, subprocess, tarfile
10 10 from i18n import _
11 11 import config, util, node, error, cmdutil
12 12 hg = None
13 13
14 14 nullstate = ('', '', 'empty')
15 15
16
17 def substate(ctx):
18 rev = {}
19 if '.hgsubstate' in ctx:
20 try:
21 for l in ctx['.hgsubstate'].data().splitlines():
22 revision, path = l.split(" ", 1)
23 rev[path] = revision
24 except IOError, err:
25 if err.errno != errno.ENOENT:
26 raise
27 return rev
28
29 16 def state(ctx, ui):
30 17 """return a state dict, mapping subrepo paths configured in .hgsub
31 18 to tuple: (source from .hgsub, revision from .hgsubstate, kind
32 19 (key in types dict))
33 20 """
34 21 p = config.config()
35 22 def read(f, sections=None, remap=None):
36 23 if f in ctx:
37 24 try:
38 25 data = ctx[f].data()
39 26 except IOError, err:
40 27 if err.errno != errno.ENOENT:
41 28 raise
42 29 # handle missing subrepo spec files as removed
43 30 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
44 31 return
45 32 p.parse(f, data, sections, remap, read)
46 33 else:
47 34 raise util.Abort(_("subrepo spec file %s not found") % f)
48 35
49 36 if '.hgsub' in ctx:
50 37 read('.hgsub')
51 38
52 39 for path, src in ui.configitems('subpaths'):
53 40 p.set('subpaths', path, src, ui.configsource('subpaths', path))
54 41
55 rev = substate(ctx)
42 rev = {}
43 if '.hgsubstate' in ctx:
44 try:
45 for l in ctx['.hgsubstate'].data().splitlines():
46 revision, path = l.split(" ", 1)
47 rev[path] = revision
48 except IOError, err:
49 if err.errno != errno.ENOENT:
50 raise
56 51
57 52 state = {}
58 53 for path, src in p[''].items():
59 54 kind = 'hg'
60 55 if src.startswith('['):
61 56 if ']' not in src:
62 57 raise util.Abort(_('missing ] in subrepo source'))
63 58 kind, src = src.split(']', 1)
64 59 kind = kind[1:]
65 60
66 61 for pattern, repl in p.items('subpaths'):
67 62 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
68 63 # does a string decode.
69 64 repl = repl.encode('string-escape')
70 65 # However, we still want to allow back references to go
71 66 # through unharmed, so we turn r'\\1' into r'\1'. Again,
72 67 # extra escapes are needed because re.sub string decodes.
73 68 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
74 69 try:
75 70 src = re.sub(pattern, repl, src, 1)
76 71 except re.error, e:
77 72 raise util.Abort(_("bad subrepository pattern in %s: %s")
78 73 % (p.source('subpaths', pattern), e))
79 74
80 75 state[path] = (src.strip(), rev.get(path, ''), kind)
81 76
82 77 return state
83 78
84 79 def writestate(repo, state):
85 80 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
86 81 repo.wwrite('.hgsubstate',
87 82 ''.join(['%s %s\n' % (state[s][1], s)
88 83 for s in sorted(state)]), '')
89 84
90 85 def submerge(repo, wctx, mctx, actx):
91 86 """delegated from merge.applyupdates: merging of .hgsubstate file
92 87 in working context, merging context and ancestor context"""
93 88 if mctx == actx: # backwards?
94 89 actx = wctx.p1()
95 90 s1 = wctx.substate
96 91 s2 = mctx.substate
97 92 sa = actx.substate
98 93 sm = {}
99 94
100 95 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
101 96
102 97 def debug(s, msg, r=""):
103 98 if r:
104 99 r = "%s:%s:%s" % r
105 100 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
106 101
107 102 for s, l in s1.items():
108 103 a = sa.get(s, nullstate)
109 104 ld = l # local state with possible dirty flag for compares
110 105 if wctx.sub(s).dirty():
111 106 ld = (l[0], l[1] + "+")
112 107 if wctx == actx: # overwrite
113 108 a = ld
114 109
115 110 if s in s2:
116 111 r = s2[s]
117 112 if ld == r or r == a: # no change or local is newer
118 113 sm[s] = l
119 114 continue
120 115 elif ld == a: # other side changed
121 116 debug(s, "other changed, get", r)
122 117 wctx.sub(s).get(r)
123 118 sm[s] = r
124 119 elif ld[0] != r[0]: # sources differ
125 120 if repo.ui.promptchoice(
126 121 _(' subrepository sources for %s differ\n'
127 122 'use (l)ocal source (%s) or (r)emote source (%s)?')
128 123 % (s, l[0], r[0]),
129 124 (_('&Local'), _('&Remote')), 0):
130 125 debug(s, "prompt changed, get", r)
131 126 wctx.sub(s).get(r)
132 127 sm[s] = r
133 128 elif ld[1] == a[1]: # local side is unchanged
134 129 debug(s, "other side changed, get", r)
135 130 wctx.sub(s).get(r)
136 131 sm[s] = r
137 132 else:
138 133 debug(s, "both sides changed, merge with", r)
139 134 wctx.sub(s).merge(r)
140 135 sm[s] = l
141 136 elif ld == a: # remote removed, local unchanged
142 137 debug(s, "remote removed, remove")
143 138 wctx.sub(s).remove()
144 139 else:
145 140 if repo.ui.promptchoice(
146 141 _(' local changed subrepository %s which remote removed\n'
147 142 'use (c)hanged version or (d)elete?') % s,
148 143 (_('&Changed'), _('&Delete')), 0):
149 144 debug(s, "prompt remove")
150 145 wctx.sub(s).remove()
151 146
152 147 for s, r in s2.items():
153 148 if s in s1:
154 149 continue
155 150 elif s not in sa:
156 151 debug(s, "remote added, get", r)
157 152 mctx.sub(s).get(r)
158 153 sm[s] = r
159 154 elif r != sa[s]:
160 155 if repo.ui.promptchoice(
161 156 _(' remote changed subrepository %s which local removed\n'
162 157 'use (c)hanged version or (d)elete?') % s,
163 158 (_('&Changed'), _('&Delete')), 0) == 0:
164 159 debug(s, "prompt recreate", r)
165 160 wctx.sub(s).get(r)
166 161 sm[s] = r
167 162
168 163 # record merged .hgsubstate
169 164 writestate(repo, sm)
170 165
171 166 def reporelpath(repo):
172 167 """return path to this (sub)repo as seen from outermost repo"""
173 168 parent = repo
174 169 while hasattr(parent, '_subparent'):
175 170 parent = parent._subparent
176 171 return repo.root[len(parent.root)+1:]
177 172
178 173 def subrelpath(sub):
179 174 """return path to this subrepo as seen from outermost repo"""
180 175 if not hasattr(sub, '_repo'):
181 176 return sub._path
182 177 return reporelpath(sub._repo)
183 178
184 179 def _abssource(repo, push=False, abort=True):
185 180 """return pull/push path of repo - either based on parent repo .hgsub info
186 181 or on the top repo config. Abort or return None if no source found."""
187 182 if hasattr(repo, '_subparent'):
188 183 source = repo._subsource
189 184 if source.startswith('/') or '://' in source:
190 185 return source
191 186 parent = _abssource(repo._subparent, push, abort=False)
192 187 if parent:
193 188 if '://' in parent:
194 189 if parent[-1] == '/':
195 190 parent = parent[:-1]
196 191 r = urlparse.urlparse(parent + '/' + source)
197 192 r = urlparse.urlunparse((r[0], r[1],
198 193 posixpath.normpath(r[2]),
199 194 r[3], r[4], r[5]))
200 195 return r
201 196 else: # plain file system path
202 197 return posixpath.normpath(os.path.join(parent, repo._subsource))
203 198 else: # recursion reached top repo
204 199 if hasattr(repo, '_subtoppath'):
205 200 return repo._subtoppath
206 201 if push and repo.ui.config('paths', 'default-push'):
207 202 return repo.ui.config('paths', 'default-push')
208 203 if repo.ui.config('paths', 'default'):
209 204 return repo.ui.config('paths', 'default')
210 205 if abort:
211 206 raise util.Abort(_("default path for subrepository %s not found") %
212 207 reporelpath(repo))
213 208
214 209 def itersubrepos(ctx1, ctx2):
215 210 """find subrepos in ctx1 or ctx2"""
216 211 # Create a (subpath, ctx) mapping where we prefer subpaths from
217 212 # ctx1. The subpaths from ctx2 are important when the .hgsub file
218 213 # has been modified (in ctx2) but not yet committed (in ctx1).
219 214 subpaths = dict.fromkeys(ctx2.substate, ctx2)
220 215 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
221 216 for subpath, ctx in sorted(subpaths.iteritems()):
222 217 yield subpath, ctx.sub(subpath)
223 218
224 219 def subrepo(ctx, path):
225 220 """return instance of the right subrepo class for subrepo in path"""
226 221 # subrepo inherently violates our import layering rules
227 222 # because it wants to make repo objects from deep inside the stack
228 223 # so we manually delay the circular imports to not break
229 224 # scripts that don't use our demand-loading
230 225 global hg
231 226 import hg as h
232 227 hg = h
233 228
234 229 util.path_auditor(ctx._repo.root)(path)
235 230 state = ctx.substate.get(path, nullstate)
236 231 if state[2] not in types:
237 232 raise util.Abort(_('unknown subrepo type %s') % state[2])
238 233 return types[state[2]](ctx, path, state[:2])
239 234
240 235 # subrepo classes need to implement the following abstract class:
241 236
242 237 class abstractsubrepo(object):
243 238
244 239 def dirty(self):
245 240 """returns true if the dirstate of the subrepo does not match
246 241 current stored state
247 242 """
248 243 raise NotImplementedError
249 244
250 245 def checknested(self, path):
251 246 """check if path is a subrepository within this repository"""
252 247 return False
253 248
254 249 def commit(self, text, user, date):
255 250 """commit the current changes to the subrepo with the given
256 251 log message. Use given user and date if possible. Return the
257 252 new state of the subrepo.
258 253 """
259 254 raise NotImplementedError
260 255
261 256 def remove(self):
262 257 """remove the subrepo
263 258
264 259 (should verify the dirstate is not dirty first)
265 260 """
266 261 raise NotImplementedError
267 262
268 263 def get(self, state):
269 264 """run whatever commands are needed to put the subrepo into
270 265 this state
271 266 """
272 267 raise NotImplementedError
273 268
274 269 def merge(self, state):
275 270 """merge currently-saved state with the new state."""
276 271 raise NotImplementedError
277 272
278 273 def push(self, force):
279 274 """perform whatever action is analogous to 'hg push'
280 275
281 276 This may be a no-op on some systems.
282 277 """
283 278 raise NotImplementedError
284 279
285 280 def add(self, ui, match, dryrun, prefix):
286 281 return []
287 282
288 283 def status(self, rev2, **opts):
289 284 return [], [], [], [], [], [], []
290 285
291 286 def diff(self, diffopts, node2, match, prefix, **opts):
292 287 pass
293 288
294 289 def outgoing(self, ui, dest, opts):
295 290 return 1
296 291
297 292 def incoming(self, ui, source, opts):
298 293 return 1
299 294
300 295 def files(self):
301 296 """return filename iterator"""
302 297 raise NotImplementedError
303 298
304 299 def filedata(self, name):
305 300 """return file data"""
306 301 raise NotImplementedError
307 302
308 303 def fileflags(self, name):
309 304 """return file flags"""
310 305 return ''
311 306
312 307 def archive(self, ui, archiver, prefix):
313 308 files = self.files()
314 309 total = len(files)
315 310 relpath = subrelpath(self)
316 311 ui.progress(_('archiving (%s)') % relpath, 0,
317 312 unit=_('files'), total=total)
318 313 for i, name in enumerate(files):
319 314 flags = self.fileflags(name)
320 315 mode = 'x' in flags and 0755 or 0644
321 316 symlink = 'l' in flags
322 317 archiver.addfile(os.path.join(prefix, self._path, name),
323 318 mode, symlink, self.filedata(name))
324 319 ui.progress(_('archiving (%s)') % relpath, i + 1,
325 320 unit=_('files'), total=total)
326 321 ui.progress(_('archiving (%s)') % relpath, None)
327 322
328 323
329 324 class hgsubrepo(abstractsubrepo):
330 325 def __init__(self, ctx, path, state):
331 326 self._path = path
332 327 self._state = state
333 328 r = ctx._repo
334 329 root = r.wjoin(path)
335 330 create = False
336 331 if not os.path.exists(os.path.join(root, '.hg')):
337 332 create = True
338 333 util.makedirs(root)
339 334 self._repo = hg.repository(r.ui, root, create=create)
340 335 self._repo._subparent = r
341 336 self._repo._subsource = state[0]
342 337
343 338 if create:
344 339 fp = self._repo.opener("hgrc", "w", text=True)
345 340 fp.write('[paths]\n')
346 341
347 342 def addpathconfig(key, value):
348 343 if value:
349 344 fp.write('%s = %s\n' % (key, value))
350 345 self._repo.ui.setconfig('paths', key, value)
351 346
352 347 defpath = _abssource(self._repo, abort=False)
353 348 defpushpath = _abssource(self._repo, True, abort=False)
354 349 addpathconfig('default', defpath)
355 350 if defpath != defpushpath:
356 351 addpathconfig('default-push', defpushpath)
357 352 fp.close()
358 353
359 354 def add(self, ui, match, dryrun, prefix):
360 355 return cmdutil.add(ui, self._repo, match, dryrun, True,
361 356 os.path.join(prefix, self._path))
362 357
363 358 def status(self, rev2, **opts):
364 359 try:
365 360 rev1 = self._state[1]
366 361 ctx1 = self._repo[rev1]
367 362 ctx2 = self._repo[rev2]
368 363 return self._repo.status(ctx1, ctx2, **opts)
369 364 except error.RepoLookupError, inst:
370 365 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
371 366 % (inst, subrelpath(self)))
372 367 return [], [], [], [], [], [], []
373 368
374 369 def diff(self, diffopts, node2, match, prefix, **opts):
375 370 try:
376 371 node1 = node.bin(self._state[1])
377 372 # We currently expect node2 to come from substate and be
378 373 # in hex format
379 374 if node2 is not None:
380 375 node2 = node.bin(node2)
381 376 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
382 377 node1, node2, match,
383 378 prefix=os.path.join(prefix, self._path),
384 379 listsubrepos=True, **opts)
385 380 except error.RepoLookupError, inst:
386 381 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
387 382 % (inst, subrelpath(self)))
388 383
389 384 def archive(self, ui, archiver, prefix):
390 385 abstractsubrepo.archive(self, ui, archiver, prefix)
391 386
392 387 rev = self._state[1]
393 388 ctx = self._repo[rev]
394 389 for subpath in ctx.substate:
395 390 s = subrepo(ctx, subpath)
396 391 s.archive(ui, archiver, os.path.join(prefix, self._path))
397 392
398 393 def dirty(self):
399 394 r = self._state[1]
400 395 if r == '':
401 396 return True
402 397 w = self._repo[None]
403 398 if w.p1() != self._repo[r]: # version checked out change
404 399 return True
405 400 return w.dirty() # working directory changed
406 401
407 402 def checknested(self, path):
408 403 return self._repo._checknested(self._repo.wjoin(path))
409 404
410 405 def commit(self, text, user, date):
411 406 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
412 407 n = self._repo.commit(text, user, date)
413 408 if not n:
414 409 return self._repo['.'].hex() # different version checked out
415 410 return node.hex(n)
416 411
417 412 def remove(self):
418 413 # we can't fully delete the repository as it may contain
419 414 # local-only history
420 415 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
421 416 hg.clean(self._repo, node.nullid, False)
422 417
423 418 def _get(self, state):
424 419 source, revision, kind = state
425 420 try:
426 421 self._repo.lookup(revision)
427 422 except error.RepoError:
428 423 self._repo._subsource = source
429 424 srcurl = _abssource(self._repo)
430 425 self._repo.ui.status(_('pulling subrepo %s from %s\n')
431 426 % (subrelpath(self), srcurl))
432 427 other = hg.repository(self._repo.ui, srcurl)
433 428 self._repo.pull(other)
434 429
435 430 def get(self, state):
436 431 self._get(state)
437 432 source, revision, kind = state
438 433 self._repo.ui.debug("getting subrepo %s\n" % self._path)
439 434 hg.clean(self._repo, revision, False)
440 435
441 436 def merge(self, state):
442 437 self._get(state)
443 438 cur = self._repo['.']
444 439 dst = self._repo[state[1]]
445 440 anc = dst.ancestor(cur)
446 441 if anc == cur:
447 442 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
448 443 hg.update(self._repo, state[1])
449 444 elif anc == dst:
450 445 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
451 446 else:
452 447 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
453 448 hg.merge(self._repo, state[1], remind=False)
454 449
455 450 def push(self, force):
456 451 # push subrepos depth-first for coherent ordering
457 452 c = self._repo['']
458 453 subs = c.substate # only repos that are committed
459 454 for s in sorted(subs):
460 455 if not c.sub(s).push(force):
461 456 return False
462 457
463 458 dsturl = _abssource(self._repo, True)
464 459 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
465 460 (subrelpath(self), dsturl))
466 461 other = hg.repository(self._repo.ui, dsturl)
467 462 return self._repo.push(other, force)
468 463
469 464 def outgoing(self, ui, dest, opts):
470 465 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
471 466
472 467 def incoming(self, ui, source, opts):
473 468 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
474 469
475 470 def files(self):
476 471 rev = self._state[1]
477 472 ctx = self._repo[rev]
478 473 return ctx.manifest()
479 474
480 475 def filedata(self, name):
481 476 rev = self._state[1]
482 477 return self._repo[rev][name].data()
483 478
484 479 def fileflags(self, name):
485 480 rev = self._state[1]
486 481 ctx = self._repo[rev]
487 482 return ctx.flags(name)
488 483
489 484
490 485 class svnsubrepo(abstractsubrepo):
491 486 def __init__(self, ctx, path, state):
492 487 self._path = path
493 488 self._state = state
494 489 self._ctx = ctx
495 490 self._ui = ctx._repo.ui
496 491
497 492 def _svncommand(self, commands, filename=''):
498 493 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
499 494 cmd = ['svn'] + commands + [path]
500 495 env = dict(os.environ)
501 496 # Avoid localized output, preserve current locale for everything else.
502 497 env['LC_MESSAGES'] = 'C'
503 498 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
504 499 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
505 500 universal_newlines=True, env=env)
506 501 stdout, stderr = p.communicate()
507 502 stderr = stderr.strip()
508 503 if stderr:
509 504 raise util.Abort(stderr)
510 505 return stdout
511 506
512 507 def _wcrev(self):
513 508 output = self._svncommand(['info', '--xml'])
514 509 doc = xml.dom.minidom.parseString(output)
515 510 entries = doc.getElementsByTagName('entry')
516 511 if not entries:
517 512 return '0'
518 513 return str(entries[0].getAttribute('revision')) or '0'
519 514
520 515 def _wcchanged(self):
521 516 """Return (changes, extchanges) where changes is True
522 517 if the working directory was changed, and extchanges is
523 518 True if any of these changes concern an external entry.
524 519 """
525 520 output = self._svncommand(['status', '--xml'])
526 521 externals, changes = [], []
527 522 doc = xml.dom.minidom.parseString(output)
528 523 for e in doc.getElementsByTagName('entry'):
529 524 s = e.getElementsByTagName('wc-status')
530 525 if not s:
531 526 continue
532 527 item = s[0].getAttribute('item')
533 528 props = s[0].getAttribute('props')
534 529 path = e.getAttribute('path')
535 530 if item == 'external':
536 531 externals.append(path)
537 532 if (item not in ('', 'normal', 'unversioned', 'external')
538 533 or props not in ('', 'none')):
539 534 changes.append(path)
540 535 for path in changes:
541 536 for ext in externals:
542 537 if path == ext or path.startswith(ext + os.sep):
543 538 return True, True
544 539 return bool(changes), False
545 540
546 541 def dirty(self):
547 542 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
548 543 return False
549 544 return True
550 545
551 546 def commit(self, text, user, date):
552 547 # user and date are out of our hands since svn is centralized
553 548 changed, extchanged = self._wcchanged()
554 549 if not changed:
555 550 return self._wcrev()
556 551 if extchanged:
557 552 # Do not try to commit externals
558 553 raise util.Abort(_('cannot commit svn externals'))
559 554 commitinfo = self._svncommand(['commit', '-m', text])
560 555 self._ui.status(commitinfo)
561 556 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
562 557 if not newrev:
563 558 raise util.Abort(commitinfo.splitlines()[-1])
564 559 newrev = newrev.groups()[0]
565 560 self._ui.status(self._svncommand(['update', '-r', newrev]))
566 561 return newrev
567 562
568 563 def remove(self):
569 564 if self.dirty():
570 565 self._ui.warn(_('not removing repo %s because '
571 566 'it has changes.\n' % self._path))
572 567 return
573 568 self._ui.note(_('removing subrepo %s\n') % self._path)
574 569
575 570 def onerror(function, path, excinfo):
576 571 if function is not os.remove:
577 572 raise
578 573 # read-only files cannot be unlinked under Windows
579 574 s = os.stat(path)
580 575 if (s.st_mode & stat.S_IWRITE) != 0:
581 576 raise
582 577 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
583 578 os.remove(path)
584 579
585 580 path = self._ctx._repo.wjoin(self._path)
586 581 shutil.rmtree(path, onerror=onerror)
587 582 try:
588 583 os.removedirs(os.path.dirname(path))
589 584 except OSError:
590 585 pass
591 586
592 587 def get(self, state):
593 588 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
594 589 if not re.search('Checked out revision [0-9]+.', status):
595 590 raise util.Abort(status.splitlines()[-1])
596 591 self._ui.status(status)
597 592
598 593 def merge(self, state):
599 594 old = int(self._state[1])
600 595 new = int(state[1])
601 596 if new > old:
602 597 self.get(state)
603 598
604 599 def push(self, force):
605 600 # push is a no-op for SVN
606 601 return True
607 602
608 603 def files(self):
609 604 output = self._svncommand(['list'])
610 605 # This works because svn forbids \n in filenames.
611 606 return output.splitlines()
612 607
613 608 def filedata(self, name):
614 609 return self._svncommand(['cat'], name)
615 610
616 611
617 612 class gitsubrepo(abstractsubrepo):
618 613 def __init__(self, ctx, path, state):
619 614 # TODO add git version check.
620 615 self._state = state
621 616 self._ctx = ctx
622 617 self._relpath = path
623 618 self._path = ctx._repo.wjoin(path)
624 619 self._ui = ctx._repo.ui
625 620
626 621 def _gitcommand(self, commands, env=None, stream=False):
627 622 return self._gitdir(commands, env=env, stream=stream)[0]
628 623
629 624 def _gitdir(self, commands, env=None, stream=False):
630 625 return self._gitnodir(commands, env=env, stream=stream, cwd=self._path)
631 626
632 627 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
633 628 """Calls the git command
634 629
635 630 The methods tries to call the git command. versions previor to 1.6.0
636 631 are not supported and very probably fail.
637 632 """
638 633 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
639 634 # unless ui.quiet is set, print git's stderr,
640 635 # which is mostly progress and useful info
641 636 errpipe = None
642 637 if self._ui.quiet:
643 638 errpipe = open(os.devnull, 'w')
644 639 p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
645 640 close_fds=util.closefds,
646 641 stdout=subprocess.PIPE, stderr=errpipe)
647 642 if stream:
648 643 return p.stdout, None
649 644
650 645 retdata = p.stdout.read().strip()
651 646 # wait for the child to exit to avoid race condition.
652 647 p.wait()
653 648
654 649 if p.returncode != 0 and p.returncode != 1:
655 650 # there are certain error codes that are ok
656 651 command = commands[0]
657 652 if command in ('cat-file', 'symbolic-ref'):
658 653 return retdata, p.returncode
659 654 # for all others, abort
660 655 raise util.Abort('git %s error %d in %s' %
661 656 (command, p.returncode, self._relpath))
662 657
663 658 return retdata, p.returncode
664 659
665 660 def _gitstate(self):
666 661 return self._gitcommand(['rev-parse', 'HEAD'])
667 662
668 663 def _gitcurrentbranch(self):
669 664 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
670 665 if err:
671 666 current = None
672 667 return current
673 668
674 669 def _githavelocally(self, revision):
675 670 out, code = self._gitdir(['cat-file', '-e', revision])
676 671 return code == 0
677 672
678 673 def _gitisancestor(self, r1, r2):
679 674 base = self._gitcommand(['merge-base', r1, r2])
680 675 return base == r1
681 676
682 677 def _gitbranchmap(self):
683 678 '''returns 3 things:
684 679 a map from git branch to revision
685 680 a map from revision to branches
686 681 a map from remote branch to local tracking branch'''
687 682 branch2rev = {}
688 683 rev2branch = {}
689 684 tracking = {}
690 685 out = self._gitcommand(['for-each-ref', '--format',
691 686 '%(objectname) %(refname) %(upstream) end'])
692 687 for line in out.split('\n'):
693 688 revision, ref, upstream = line.split(' ')[:3]
694 689 if ref.startswith('refs/tags/'):
695 690 continue
696 691 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
697 692 continue # ignore remote/HEAD redirects
698 693 branch2rev[ref] = revision
699 694 rev2branch.setdefault(revision, []).append(ref)
700 695 if upstream:
701 696 # assumes no more than one local tracking branch for a remote
702 697 tracking[upstream] = ref
703 698 return branch2rev, rev2branch, tracking
704 699
705 700 def _fetch(self, source, revision):
706 701 if not os.path.exists('%s/.git' % self._path):
707 702 self._ui.status(_('cloning subrepo %s\n') % self._relpath)
708 703 self._gitnodir(['clone', source, self._path])
709 704 if self._githavelocally(revision):
710 705 return
711 706 self._ui.status(_('pulling subrepo %s\n') % self._relpath)
712 707 # first try from origin
713 708 self._gitcommand(['fetch'])
714 709 if self._githavelocally(revision):
715 710 return
716 711 # then try from known subrepo source
717 712 self._gitcommand(['fetch', source])
718 713 if not self._githavelocally(revision):
719 714 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
720 715 (revision, self._path))
721 716
722 717 def dirty(self):
723 718 if self._state[1] != self._gitstate(): # version checked out changed?
724 719 return True
725 720 # check for staged changes or modified files; ignore untracked files
726 721 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
727 722 return code == 1
728 723
729 724 def get(self, state):
730 725 source, revision, kind = state
731 726 self._fetch(source, revision)
732 727 # if the repo was set to be bare, unbare it
733 728 if self._gitcommand(['config', '--bool', 'core.bare']) == 'true':
734 729 self._gitcommand(['config', 'core.bare', 'false'])
735 730 if self._gitstate() == revision:
736 731 self._gitcommand(['reset', '--hard', 'HEAD'])
737 732 return
738 733 elif self._gitstate() == revision:
739 734 return
740 735 branch2rev, rev2branch, tracking = self._gitbranchmap()
741 736
742 737 def rawcheckout():
743 738 # no branch to checkout, check it out with no branch
744 739 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
745 740 self._relpath)
746 741 self._ui.warn(_('check out a git branch if you intend '
747 742 'to make changes\n'))
748 743 self._gitcommand(['checkout', '-q', revision])
749 744
750 745 if revision not in rev2branch:
751 746 rawcheckout()
752 747 return
753 748 branches = rev2branch[revision]
754 749 firstlocalbranch = None
755 750 for b in branches:
756 751 if b == 'refs/heads/master':
757 752 # master trumps all other branches
758 753 self._gitcommand(['checkout', 'refs/heads/master'])
759 754 return
760 755 if not firstlocalbranch and not b.startswith('refs/remotes/'):
761 756 firstlocalbranch = b
762 757 if firstlocalbranch:
763 758 self._gitcommand(['checkout', firstlocalbranch])
764 759 return
765 760
766 761 # choose a remote branch already tracked if possible
767 762 remote = branches[0]
768 763 if remote not in tracking:
769 764 for b in branches:
770 765 if b in tracking:
771 766 remote = b
772 767 break
773 768
774 769 if remote not in tracking:
775 770 # create a new local tracking branch
776 771 local = remote.split('/', 2)[2]
777 772 self._gitcommand(['checkout', '-b', local, remote])
778 773 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
779 774 # When updating to a tracked remote branch,
780 775 # if the local tracking branch is downstream of it,
781 776 # a normal `git pull` would have performed a "fast-forward merge"
782 777 # which is equivalent to updating the local branch to the remote.
783 778 # Since we are only looking at branching at update, we need to
784 779 # detect this situation and perform this action lazily.
785 780 if tracking[remote] != self._gitcurrentbranch():
786 781 self._gitcommand(['checkout', tracking[remote]])
787 782 self._gitcommand(['merge', '--ff', remote])
788 783 else:
789 784 # a real merge would be required, just checkout the revision
790 785 rawcheckout()
791 786
792 787 def commit(self, text, user, date):
793 788 cmd = ['commit', '-a', '-m', text]
794 789 env = os.environ.copy()
795 790 if user:
796 791 cmd += ['--author', user]
797 792 if date:
798 793 # git's date parser silently ignores when seconds < 1e9
799 794 # convert to ISO8601
800 795 env['GIT_AUTHOR_DATE'] = util.datestr(date,
801 796 '%Y-%m-%dT%H:%M:%S %1%2')
802 797 self._gitcommand(cmd, env=env)
803 798 # make sure commit works otherwise HEAD might not exist under certain
804 799 # circumstances
805 800 return self._gitstate()
806 801
807 802 def merge(self, state):
808 803 source, revision, kind = state
809 804 self._fetch(source, revision)
810 805 base = self._gitcommand(['merge-base', revision, self._state[1]])
811 806 if base == revision:
812 807 self.get(state) # fast forward merge
813 808 elif base != self._state[1]:
814 809 self._gitcommand(['merge', '--no-commit', revision])
815 810
816 811 def push(self, force):
817 812 # if a branch in origin contains the revision, nothing to do
818 813 branch2rev, rev2branch, tracking = self._gitbranchmap()
819 814 if self._state[1] in rev2branch:
820 815 for b in rev2branch[self._state[1]]:
821 816 if b.startswith('refs/remotes/origin/'):
822 817 return True
823 818 for b, revision in branch2rev.iteritems():
824 819 if b.startswith('refs/remotes/origin/'):
825 820 if self._gitisancestor(self._state[1], revision):
826 821 return True
827 822 # otherwise, try to push the currently checked out branch
828 823 cmd = ['push']
829 824 if force:
830 825 cmd.append('--force')
831 826
832 827 current = self._gitcurrentbranch()
833 828 if current:
834 829 # determine if the current branch is even useful
835 830 if not self._gitisancestor(self._state[1], current):
836 831 self._ui.warn(_('unrelated git branch checked out '
837 832 'in subrepo %s\n') % self._relpath)
838 833 return False
839 834 self._ui.status(_('pushing branch %s of subrepo %s\n') %
840 835 (current.split('/', 2)[2], self._relpath))
841 836 self._gitcommand(cmd + ['origin', current])
842 837 return True
843 838 else:
844 839 self._ui.warn(_('no branch checked out in subrepo %s\n'
845 840 'cannot push revision %s') %
846 841 (self._relpath, self._state[1]))
847 842 return False
848 843
849 844 def remove(self):
850 845 if self.dirty():
851 846 self._ui.warn(_('not removing repo %s because '
852 847 'it has changes.\n') % self._path)
853 848 return
854 849 # we can't fully delete the repository as it may contain
855 850 # local-only history
856 851 self._ui.note(_('removing subrepo %s\n') % self._path)
857 852 self._gitcommand(['config', 'core.bare', 'true'])
858 853 for f in os.listdir(self._path):
859 854 if f == '.git':
860 855 continue
861 856 path = os.path.join(self._path, f)
862 857 if os.path.isdir(path) and not os.path.islink(path):
863 858 shutil.rmtree(path)
864 859 else:
865 860 os.remove(path)
866 861
867 862 def archive(self, ui, archiver, prefix):
868 863 source, revision = self._state
869 864 self._fetch(source, revision)
870 865
871 866 # Parse git's native archive command.
872 867 # This should be much faster than manually traversing the trees
873 868 # and objects with many subprocess calls.
874 869 tarstream = self._gitcommand(['archive', revision], stream=True)
875 870 tar = tarfile.open(fileobj=tarstream, mode='r|')
876 871 relpath = subrelpath(self)
877 872 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
878 873 for i, info in enumerate(tar):
879 874 archiver.addfile(os.path.join(prefix, self._relpath, info.name),
880 875 info.mode, info.issym(),
881 876 tar.extractfile(info).read())
882 877 ui.progress(_('archiving (%s)') % relpath, i + 1,
883 878 unit=_('files'))
884 879 ui.progress(_('archiving (%s)') % relpath, None)
885 880
886 881
887 882 types = {
888 883 'hg': hgsubrepo,
889 884 'svn': svnsubrepo,
890 885 'git': gitsubrepo,
891 886 }
1 NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now