##// END OF EJS Templates
localrepo: make requirements attribute of newly-created repos contain a set...
Andrew Pritchard -
r14905:207935cd default
parent child Browse files
Show More
@@ -1,2002 +1,2003 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 23 'known', 'getbundle'))
24 24 supportedformats = set(('revlogv1', 'generaldelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=False):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 34 self.opener = scmutil.opener(self.path)
35 35 self.wopener = scmutil.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 util.makedir(self.path, notindexed=True)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener.append(
60 60 "00changelog.i",
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 if self.ui.configbool('format', 'generaldelta', False):
65 65 requirements.append("generaldelta")
66 requirements = set(requirements)
66 67 else:
67 68 raise error.RepoError(_("repository %s not found") % path)
68 69 elif create:
69 70 raise error.RepoError(_("repository %s already exists") % path)
70 71 else:
71 72 try:
72 73 requirements = scmutil.readrequires(self.opener, self.supported)
73 74 except IOError, inst:
74 75 if inst.errno != errno.ENOENT:
75 76 raise
76 77 requirements = set()
77 78
78 79 self.sharedpath = self.path
79 80 try:
80 81 s = os.path.realpath(self.opener.read("sharedpath"))
81 82 if not os.path.exists(s):
82 83 raise error.RepoError(
83 84 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 85 self.sharedpath = s
85 86 except IOError, inst:
86 87 if inst.errno != errno.ENOENT:
87 88 raise
88 89
89 90 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
90 91 self.spath = self.store.path
91 92 self.sopener = self.store.opener
92 93 self.sjoin = self.store.join
93 94 self.opener.createmode = self.store.createmode
94 95 self._applyrequirements(requirements)
95 96 if create:
96 97 self._writerequirements()
97 98
98 99 # These two define the set of tags for this repository. _tags
99 100 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 101 # 'local'. (Global tags are defined by .hgtags across all
101 102 # heads, and local tags are defined in .hg/localtags.) They
102 103 # constitute the in-memory cache of tags.
103 104 self._tags = None
104 105 self._tagtypes = None
105 106
106 107 self._branchcache = None
107 108 self._branchcachetip = None
108 109 self.nodetagscache = None
109 110 self.filterpats = {}
110 111 self._datafilters = {}
111 112 self._transref = self._lockref = self._wlockref = None
112 113
113 114 def _applyrequirements(self, requirements):
114 115 self.requirements = requirements
115 116 openerreqs = set(('revlogv1', 'generaldelta'))
116 117 self.sopener.options = dict((r, 1) for r in requirements
117 118 if r in openerreqs)
118 119
119 120 def _writerequirements(self):
120 121 reqfile = self.opener("requires", "w")
121 122 for r in self.requirements:
122 123 reqfile.write("%s\n" % r)
123 124 reqfile.close()
124 125
125 126 def _checknested(self, path):
126 127 """Determine if path is a legal nested repository."""
127 128 if not path.startswith(self.root):
128 129 return False
129 130 subpath = path[len(self.root) + 1:]
130 131
131 132 # XXX: Checking against the current working copy is wrong in
132 133 # the sense that it can reject things like
133 134 #
134 135 # $ hg cat -r 10 sub/x.txt
135 136 #
136 137 # if sub/ is no longer a subrepository in the working copy
137 138 # parent revision.
138 139 #
139 140 # However, it can of course also allow things that would have
140 141 # been rejected before, such as the above cat command if sub/
141 142 # is a subrepository now, but was a normal directory before.
142 143 # The old path auditor would have rejected by mistake since it
143 144 # panics when it sees sub/.hg/.
144 145 #
145 146 # All in all, checking against the working copy seems sensible
146 147 # since we want to prevent access to nested repositories on
147 148 # the filesystem *now*.
148 149 ctx = self[None]
149 150 parts = util.splitpath(subpath)
150 151 while parts:
151 152 prefix = os.sep.join(parts)
152 153 if prefix in ctx.substate:
153 154 if prefix == subpath:
154 155 return True
155 156 else:
156 157 sub = ctx.sub(prefix)
157 158 return sub.checknested(subpath[len(prefix) + 1:])
158 159 else:
159 160 parts.pop()
160 161 return False
161 162
162 163 @util.propertycache
163 164 def _bookmarks(self):
164 165 return bookmarks.read(self)
165 166
166 167 @util.propertycache
167 168 def _bookmarkcurrent(self):
168 169 return bookmarks.readcurrent(self)
169 170
170 171 @propertycache
171 172 def changelog(self):
172 173 c = changelog.changelog(self.sopener)
173 174 if 'HG_PENDING' in os.environ:
174 175 p = os.environ['HG_PENDING']
175 176 if p.startswith(self.root):
176 177 c.readpending('00changelog.i.a')
177 178 return c
178 179
179 180 @propertycache
180 181 def manifest(self):
181 182 return manifest.manifest(self.sopener)
182 183
183 184 @propertycache
184 185 def dirstate(self):
185 186 warned = [0]
186 187 def validate(node):
187 188 try:
188 189 self.changelog.rev(node)
189 190 return node
190 191 except error.LookupError:
191 192 if not warned[0]:
192 193 warned[0] = True
193 194 self.ui.warn(_("warning: ignoring unknown"
194 195 " working parent %s!\n") % short(node))
195 196 return nullid
196 197
197 198 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198 199
199 200 def __getitem__(self, changeid):
200 201 if changeid is None:
201 202 return context.workingctx(self)
202 203 return context.changectx(self, changeid)
203 204
204 205 def __contains__(self, changeid):
205 206 try:
206 207 return bool(self.lookup(changeid))
207 208 except error.RepoLookupError:
208 209 return False
209 210
210 211 def __nonzero__(self):
211 212 return True
212 213
213 214 def __len__(self):
214 215 return len(self.changelog)
215 216
216 217 def __iter__(self):
217 218 for i in xrange(len(self)):
218 219 yield i
219 220
220 221 def set(self, expr, *args):
221 222 '''
222 223 Yield a context for each matching revision, after doing arg
223 224 replacement via revset.formatspec
224 225 '''
225 226
226 227 expr = revset.formatspec(expr, *args)
227 228 m = revset.match(None, expr)
228 229 for r in m(self, range(len(self))):
229 230 yield self[r]
230 231
231 232 def url(self):
232 233 return 'file:' + self.root
233 234
234 235 def hook(self, name, throw=False, **args):
235 236 return hook.hook(self.ui, self, name, throw, **args)
236 237
237 238 tag_disallowed = ':\r\n'
238 239
239 240 def _tag(self, names, node, message, local, user, date, extra={}):
240 241 if isinstance(names, str):
241 242 allchars = names
242 243 names = (names,)
243 244 else:
244 245 allchars = ''.join(names)
245 246 for c in self.tag_disallowed:
246 247 if c in allchars:
247 248 raise util.Abort(_('%r cannot be used in a tag name') % c)
248 249
249 250 branches = self.branchmap()
250 251 for name in names:
251 252 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 253 local=local)
253 254 if name in branches:
254 255 self.ui.warn(_("warning: tag %s conflicts with existing"
255 256 " branch name\n") % name)
256 257
257 258 def writetags(fp, names, munge, prevtags):
258 259 fp.seek(0, 2)
259 260 if prevtags and prevtags[-1] != '\n':
260 261 fp.write('\n')
261 262 for name in names:
262 263 m = munge and munge(name) or name
263 264 if self._tagtypes and name in self._tagtypes:
264 265 old = self._tags.get(name, nullid)
265 266 fp.write('%s %s\n' % (hex(old), m))
266 267 fp.write('%s %s\n' % (hex(node), m))
267 268 fp.close()
268 269
269 270 prevtags = ''
270 271 if local:
271 272 try:
272 273 fp = self.opener('localtags', 'r+')
273 274 except IOError:
274 275 fp = self.opener('localtags', 'a')
275 276 else:
276 277 prevtags = fp.read()
277 278
278 279 # local tags are stored in the current charset
279 280 writetags(fp, names, None, prevtags)
280 281 for name in names:
281 282 self.hook('tag', node=hex(node), tag=name, local=local)
282 283 return
283 284
284 285 try:
285 286 fp = self.wfile('.hgtags', 'rb+')
286 287 except IOError, e:
287 288 if e.errno != errno.ENOENT:
288 289 raise
289 290 fp = self.wfile('.hgtags', 'ab')
290 291 else:
291 292 prevtags = fp.read()
292 293
293 294 # committed tags are stored in UTF-8
294 295 writetags(fp, names, encoding.fromlocal, prevtags)
295 296
296 297 fp.close()
297 298
298 299 if '.hgtags' not in self.dirstate:
299 300 self[None].add(['.hgtags'])
300 301
301 302 m = matchmod.exact(self.root, '', ['.hgtags'])
302 303 tagnode = self.commit(message, user, date, extra=extra, match=m)
303 304
304 305 for name in names:
305 306 self.hook('tag', node=hex(node), tag=name, local=local)
306 307
307 308 return tagnode
308 309
309 310 def tag(self, names, node, message, local, user, date):
310 311 '''tag a revision with one or more symbolic names.
311 312
312 313 names is a list of strings or, when adding a single tag, names may be a
313 314 string.
314 315
315 316 if local is True, the tags are stored in a per-repository file.
316 317 otherwise, they are stored in the .hgtags file, and a new
317 318 changeset is committed with the change.
318 319
319 320 keyword arguments:
320 321
321 322 local: whether to store tags in non-version-controlled file
322 323 (default False)
323 324
324 325 message: commit message to use if committing
325 326
326 327 user: name of user to use if committing
327 328
328 329 date: date tuple to use if committing'''
329 330
330 331 if not local:
331 332 for x in self.status()[:5]:
332 333 if '.hgtags' in x:
333 334 raise util.Abort(_('working copy of .hgtags is changed '
334 335 '(please commit .hgtags manually)'))
335 336
336 337 self.tags() # instantiate the cache
337 338 self._tag(names, node, message, local, user, date)
338 339
339 340 def tags(self):
340 341 '''return a mapping of tag to node'''
341 342 if self._tags is None:
342 343 (self._tags, self._tagtypes) = self._findtags()
343 344
344 345 return self._tags
345 346
346 347 def _findtags(self):
347 348 '''Do the hard work of finding tags. Return a pair of dicts
348 349 (tags, tagtypes) where tags maps tag name to node, and tagtypes
349 350 maps tag name to a string like \'global\' or \'local\'.
350 351 Subclasses or extensions are free to add their own tags, but
351 352 should be aware that the returned dicts will be retained for the
352 353 duration of the localrepo object.'''
353 354
354 355 # XXX what tagtype should subclasses/extensions use? Currently
355 356 # mq and bookmarks add tags, but do not set the tagtype at all.
356 357 # Should each extension invent its own tag type? Should there
357 358 # be one tagtype for all such "virtual" tags? Or is the status
358 359 # quo fine?
359 360
360 361 alltags = {} # map tag name to (node, hist)
361 362 tagtypes = {}
362 363
363 364 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
364 365 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
365 366
366 367 # Build the return dicts. Have to re-encode tag names because
367 368 # the tags module always uses UTF-8 (in order not to lose info
368 369 # writing to the cache), but the rest of Mercurial wants them in
369 370 # local encoding.
370 371 tags = {}
371 372 for (name, (node, hist)) in alltags.iteritems():
372 373 if node != nullid:
373 374 try:
374 375 # ignore tags to unknown nodes
375 376 self.changelog.lookup(node)
376 377 tags[encoding.tolocal(name)] = node
377 378 except error.LookupError:
378 379 pass
379 380 tags['tip'] = self.changelog.tip()
380 381 tagtypes = dict([(encoding.tolocal(name), value)
381 382 for (name, value) in tagtypes.iteritems()])
382 383 return (tags, tagtypes)
383 384
384 385 def tagtype(self, tagname):
385 386 '''
386 387 return the type of the given tag. result can be:
387 388
388 389 'local' : a local tag
389 390 'global' : a global tag
390 391 None : tag does not exist
391 392 '''
392 393
393 394 self.tags()
394 395
395 396 return self._tagtypes.get(tagname)
396 397
397 398 def tagslist(self):
398 399 '''return a list of tags ordered by revision'''
399 400 l = []
400 401 for t, n in self.tags().iteritems():
401 402 r = self.changelog.rev(n)
402 403 l.append((r, t, n))
403 404 return [(t, n) for r, t, n in sorted(l)]
404 405
405 406 def nodetags(self, node):
406 407 '''return the tags associated with a node'''
407 408 if not self.nodetagscache:
408 409 self.nodetagscache = {}
409 410 for t, n in self.tags().iteritems():
410 411 self.nodetagscache.setdefault(n, []).append(t)
411 412 for tags in self.nodetagscache.itervalues():
412 413 tags.sort()
413 414 return self.nodetagscache.get(node, [])
414 415
415 416 def nodebookmarks(self, node):
416 417 marks = []
417 418 for bookmark, n in self._bookmarks.iteritems():
418 419 if n == node:
419 420 marks.append(bookmark)
420 421 return sorted(marks)
421 422
422 423 def _branchtags(self, partial, lrev):
423 424 # TODO: rename this function?
424 425 tiprev = len(self) - 1
425 426 if lrev != tiprev:
426 427 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
427 428 self._updatebranchcache(partial, ctxgen)
428 429 self._writebranchcache(partial, self.changelog.tip(), tiprev)
429 430
430 431 return partial
431 432
432 433 def updatebranchcache(self):
433 434 tip = self.changelog.tip()
434 435 if self._branchcache is not None and self._branchcachetip == tip:
435 436 return self._branchcache
436 437
437 438 oldtip = self._branchcachetip
438 439 self._branchcachetip = tip
439 440 if oldtip is None or oldtip not in self.changelog.nodemap:
440 441 partial, last, lrev = self._readbranchcache()
441 442 else:
442 443 lrev = self.changelog.rev(oldtip)
443 444 partial = self._branchcache
444 445
445 446 self._branchtags(partial, lrev)
446 447 # this private cache holds all heads (not just tips)
447 448 self._branchcache = partial
448 449
449 450 def branchmap(self):
450 451 '''returns a dictionary {branch: [branchheads]}'''
451 452 self.updatebranchcache()
452 453 return self._branchcache
453 454
454 455 def branchtags(self):
455 456 '''return a dict where branch names map to the tipmost head of
456 457 the branch, open heads come before closed'''
457 458 bt = {}
458 459 for bn, heads in self.branchmap().iteritems():
459 460 tip = heads[-1]
460 461 for h in reversed(heads):
461 462 if 'close' not in self.changelog.read(h)[5]:
462 463 tip = h
463 464 break
464 465 bt[bn] = tip
465 466 return bt
466 467
467 468 def _readbranchcache(self):
468 469 partial = {}
469 470 try:
470 471 f = self.opener("cache/branchheads")
471 472 lines = f.read().split('\n')
472 473 f.close()
473 474 except (IOError, OSError):
474 475 return {}, nullid, nullrev
475 476
476 477 try:
477 478 last, lrev = lines.pop(0).split(" ", 1)
478 479 last, lrev = bin(last), int(lrev)
479 480 if lrev >= len(self) or self[lrev].node() != last:
480 481 # invalidate the cache
481 482 raise ValueError('invalidating branch cache (tip differs)')
482 483 for l in lines:
483 484 if not l:
484 485 continue
485 486 node, label = l.split(" ", 1)
486 487 label = encoding.tolocal(label.strip())
487 488 partial.setdefault(label, []).append(bin(node))
488 489 except KeyboardInterrupt:
489 490 raise
490 491 except Exception, inst:
491 492 if self.ui.debugflag:
492 493 self.ui.warn(str(inst), '\n')
493 494 partial, last, lrev = {}, nullid, nullrev
494 495 return partial, last, lrev
495 496
496 497 def _writebranchcache(self, branches, tip, tiprev):
497 498 try:
498 499 f = self.opener("cache/branchheads", "w", atomictemp=True)
499 500 f.write("%s %s\n" % (hex(tip), tiprev))
500 501 for label, nodes in branches.iteritems():
501 502 for node in nodes:
502 503 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
503 504 f.rename()
504 505 except (IOError, OSError):
505 506 pass
506 507
507 508 def _updatebranchcache(self, partial, ctxgen):
508 509 # collect new branch entries
509 510 newbranches = {}
510 511 for c in ctxgen:
511 512 newbranches.setdefault(c.branch(), []).append(c.node())
512 513 # if older branchheads are reachable from new ones, they aren't
513 514 # really branchheads. Note checking parents is insufficient:
514 515 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
515 516 for branch, newnodes in newbranches.iteritems():
516 517 bheads = partial.setdefault(branch, [])
517 518 bheads.extend(newnodes)
518 519 if len(bheads) <= 1:
519 520 continue
520 521 bheads = sorted(bheads, key=lambda x: self[x].rev())
521 522 # starting from tip means fewer passes over reachable
522 523 while newnodes:
523 524 latest = newnodes.pop()
524 525 if latest not in bheads:
525 526 continue
526 527 minbhrev = self[bheads[0]].node()
527 528 reachable = self.changelog.reachable(latest, minbhrev)
528 529 reachable.remove(latest)
529 530 if reachable:
530 531 bheads = [b for b in bheads if b not in reachable]
531 532 partial[branch] = bheads
532 533
533 534 def lookup(self, key):
534 535 if isinstance(key, int):
535 536 return self.changelog.node(key)
536 537 elif key == '.':
537 538 return self.dirstate.p1()
538 539 elif key == 'null':
539 540 return nullid
540 541 elif key == 'tip':
541 542 return self.changelog.tip()
542 543 n = self.changelog._match(key)
543 544 if n:
544 545 return n
545 546 if key in self._bookmarks:
546 547 return self._bookmarks[key]
547 548 if key in self.tags():
548 549 return self.tags()[key]
549 550 if key in self.branchtags():
550 551 return self.branchtags()[key]
551 552 n = self.changelog._partialmatch(key)
552 553 if n:
553 554 return n
554 555
555 556 # can't find key, check if it might have come from damaged dirstate
556 557 if key in self.dirstate.parents():
557 558 raise error.Abort(_("working directory has unknown parent '%s'!")
558 559 % short(key))
559 560 try:
560 561 if len(key) == 20:
561 562 key = hex(key)
562 563 except TypeError:
563 564 pass
564 565 raise error.RepoLookupError(_("unknown revision '%s'") % key)
565 566
566 567 def lookupbranch(self, key, remote=None):
567 568 repo = remote or self
568 569 if key in repo.branchmap():
569 570 return key
570 571
571 572 repo = (remote and remote.local()) and remote or self
572 573 return repo[key].branch()
573 574
574 575 def known(self, nodes):
575 576 nm = self.changelog.nodemap
576 577 return [(n in nm) for n in nodes]
577 578
578 579 def local(self):
579 580 return self
580 581
581 582 def join(self, f):
582 583 return os.path.join(self.path, f)
583 584
584 585 def wjoin(self, f):
585 586 return os.path.join(self.root, f)
586 587
587 588 def file(self, f):
588 589 if f[0] == '/':
589 590 f = f[1:]
590 591 return filelog.filelog(self.sopener, f)
591 592
592 593 def changectx(self, changeid):
593 594 return self[changeid]
594 595
595 596 def parents(self, changeid=None):
596 597 '''get list of changectxs for parents of changeid'''
597 598 return self[changeid].parents()
598 599
599 600 def filectx(self, path, changeid=None, fileid=None):
600 601 """changeid can be a changeset revision, node, or tag.
601 602 fileid can be a file revision or node."""
602 603 return context.filectx(self, path, changeid, fileid)
603 604
604 605 def getcwd(self):
605 606 return self.dirstate.getcwd()
606 607
607 608 def pathto(self, f, cwd=None):
608 609 return self.dirstate.pathto(f, cwd)
609 610
610 611 def wfile(self, f, mode='r'):
611 612 return self.wopener(f, mode)
612 613
613 614 def _link(self, f):
614 615 return os.path.islink(self.wjoin(f))
615 616
616 617 def _loadfilter(self, filter):
617 618 if filter not in self.filterpats:
618 619 l = []
619 620 for pat, cmd in self.ui.configitems(filter):
620 621 if cmd == '!':
621 622 continue
622 623 mf = matchmod.match(self.root, '', [pat])
623 624 fn = None
624 625 params = cmd
625 626 for name, filterfn in self._datafilters.iteritems():
626 627 if cmd.startswith(name):
627 628 fn = filterfn
628 629 params = cmd[len(name):].lstrip()
629 630 break
630 631 if not fn:
631 632 fn = lambda s, c, **kwargs: util.filter(s, c)
632 633 # Wrap old filters not supporting keyword arguments
633 634 if not inspect.getargspec(fn)[2]:
634 635 oldfn = fn
635 636 fn = lambda s, c, **kwargs: oldfn(s, c)
636 637 l.append((mf, fn, params))
637 638 self.filterpats[filter] = l
638 639 return self.filterpats[filter]
639 640
640 641 def _filter(self, filterpats, filename, data):
641 642 for mf, fn, cmd in filterpats:
642 643 if mf(filename):
643 644 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
644 645 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
645 646 break
646 647
647 648 return data
648 649
649 650 @propertycache
650 651 def _encodefilterpats(self):
651 652 return self._loadfilter('encode')
652 653
653 654 @propertycache
654 655 def _decodefilterpats(self):
655 656 return self._loadfilter('decode')
656 657
657 658 def adddatafilter(self, name, filter):
658 659 self._datafilters[name] = filter
659 660
660 661 def wread(self, filename):
661 662 if self._link(filename):
662 663 data = os.readlink(self.wjoin(filename))
663 664 else:
664 665 data = self.wopener.read(filename)
665 666 return self._filter(self._encodefilterpats, filename, data)
666 667
667 668 def wwrite(self, filename, data, flags):
668 669 data = self._filter(self._decodefilterpats, filename, data)
669 670 if 'l' in flags:
670 671 self.wopener.symlink(data, filename)
671 672 else:
672 673 self.wopener.write(filename, data)
673 674 if 'x' in flags:
674 675 util.setflags(self.wjoin(filename), False, True)
675 676
676 677 def wwritedata(self, filename, data):
677 678 return self._filter(self._decodefilterpats, filename, data)
678 679
679 680 def transaction(self, desc):
680 681 tr = self._transref and self._transref() or None
681 682 if tr and tr.running():
682 683 return tr.nest()
683 684
684 685 # abort here if the journal already exists
685 686 if os.path.exists(self.sjoin("journal")):
686 687 raise error.RepoError(
687 688 _("abandoned transaction found - run hg recover"))
688 689
689 690 journalfiles = self._writejournal(desc)
690 691 renames = [(x, undoname(x)) for x in journalfiles]
691 692
692 693 tr = transaction.transaction(self.ui.warn, self.sopener,
693 694 self.sjoin("journal"),
694 695 aftertrans(renames),
695 696 self.store.createmode)
696 697 self._transref = weakref.ref(tr)
697 698 return tr
698 699
699 700 def _writejournal(self, desc):
700 701 # save dirstate for rollback
701 702 try:
702 703 ds = self.opener.read("dirstate")
703 704 except IOError:
704 705 ds = ""
705 706 self.opener.write("journal.dirstate", ds)
706 707 self.opener.write("journal.branch",
707 708 encoding.fromlocal(self.dirstate.branch()))
708 709 self.opener.write("journal.desc",
709 710 "%d\n%s\n" % (len(self), desc))
710 711
711 712 bkname = self.join('bookmarks')
712 713 if os.path.exists(bkname):
713 714 util.copyfile(bkname, self.join('journal.bookmarks'))
714 715 else:
715 716 self.opener.write('journal.bookmarks', '')
716 717
717 718 return (self.sjoin('journal'), self.join('journal.dirstate'),
718 719 self.join('journal.branch'), self.join('journal.desc'),
719 720 self.join('journal.bookmarks'))
720 721
721 722 def recover(self):
722 723 lock = self.lock()
723 724 try:
724 725 if os.path.exists(self.sjoin("journal")):
725 726 self.ui.status(_("rolling back interrupted transaction\n"))
726 727 transaction.rollback(self.sopener, self.sjoin("journal"),
727 728 self.ui.warn)
728 729 self.invalidate()
729 730 return True
730 731 else:
731 732 self.ui.warn(_("no interrupted transaction available\n"))
732 733 return False
733 734 finally:
734 735 lock.release()
735 736
736 737 def rollback(self, dryrun=False):
737 738 wlock = lock = None
738 739 try:
739 740 wlock = self.wlock()
740 741 lock = self.lock()
741 742 if os.path.exists(self.sjoin("undo")):
742 743 try:
743 744 args = self.opener.read("undo.desc").splitlines()
744 745 if len(args) >= 3 and self.ui.verbose:
745 746 desc = _("repository tip rolled back to revision %s"
746 747 " (undo %s: %s)\n") % (
747 748 int(args[0]) - 1, args[1], args[2])
748 749 elif len(args) >= 2:
749 750 desc = _("repository tip rolled back to revision %s"
750 751 " (undo %s)\n") % (
751 752 int(args[0]) - 1, args[1])
752 753 except IOError:
753 754 desc = _("rolling back unknown transaction\n")
754 755 self.ui.status(desc)
755 756 if dryrun:
756 757 return
757 758 transaction.rollback(self.sopener, self.sjoin("undo"),
758 759 self.ui.warn)
759 760 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
760 761 if os.path.exists(self.join('undo.bookmarks')):
761 762 util.rename(self.join('undo.bookmarks'),
762 763 self.join('bookmarks'))
763 764 try:
764 765 branch = self.opener.read("undo.branch")
765 766 self.dirstate.setbranch(branch)
766 767 except IOError:
767 768 self.ui.warn(_("named branch could not be reset, "
768 769 "current branch is still: %s\n")
769 770 % self.dirstate.branch())
770 771 self.invalidate()
771 772 self.dirstate.invalidate()
772 773 self.destroyed()
773 774 parents = tuple([p.rev() for p in self.parents()])
774 775 if len(parents) > 1:
775 776 self.ui.status(_("working directory now based on "
776 777 "revisions %d and %d\n") % parents)
777 778 else:
778 779 self.ui.status(_("working directory now based on "
779 780 "revision %d\n") % parents)
780 781 else:
781 782 self.ui.warn(_("no rollback information available\n"))
782 783 return 1
783 784 finally:
784 785 release(lock, wlock)
785 786
786 787 def invalidatecaches(self):
787 788 self._tags = None
788 789 self._tagtypes = None
789 790 self.nodetagscache = None
790 791 self._branchcache = None # in UTF-8
791 792 self._branchcachetip = None
792 793
793 794 def invalidate(self):
794 795 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
795 796 if a in self.__dict__:
796 797 delattr(self, a)
797 798 self.invalidatecaches()
798 799
799 800 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
800 801 try:
801 802 l = lock.lock(lockname, 0, releasefn, desc=desc)
802 803 except error.LockHeld, inst:
803 804 if not wait:
804 805 raise
805 806 self.ui.warn(_("waiting for lock on %s held by %r\n") %
806 807 (desc, inst.locker))
807 808 # default to 600 seconds timeout
808 809 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
809 810 releasefn, desc=desc)
810 811 if acquirefn:
811 812 acquirefn()
812 813 return l
813 814
814 815 def lock(self, wait=True):
815 816 '''Lock the repository store (.hg/store) and return a weak reference
816 817 to the lock. Use this before modifying the store (e.g. committing or
817 818 stripping). If you are opening a transaction, get a lock as well.)'''
818 819 l = self._lockref and self._lockref()
819 820 if l is not None and l.held:
820 821 l.lock()
821 822 return l
822 823
823 824 l = self._lock(self.sjoin("lock"), wait, self.store.write,
824 825 self.invalidate, _('repository %s') % self.origroot)
825 826 self._lockref = weakref.ref(l)
826 827 return l
827 828
828 829 def wlock(self, wait=True):
829 830 '''Lock the non-store parts of the repository (everything under
830 831 .hg except .hg/store) and return a weak reference to the lock.
831 832 Use this before modifying files in .hg.'''
832 833 l = self._wlockref and self._wlockref()
833 834 if l is not None and l.held:
834 835 l.lock()
835 836 return l
836 837
837 838 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
838 839 self.dirstate.invalidate, _('working directory of %s') %
839 840 self.origroot)
840 841 self._wlockref = weakref.ref(l)
841 842 return l
842 843
843 844 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
844 845 """
845 846 commit an individual file as part of a larger transaction
846 847 """
847 848
848 849 fname = fctx.path()
849 850 text = fctx.data()
850 851 flog = self.file(fname)
851 852 fparent1 = manifest1.get(fname, nullid)
852 853 fparent2 = fparent2o = manifest2.get(fname, nullid)
853 854
854 855 meta = {}
855 856 copy = fctx.renamed()
856 857 if copy and copy[0] != fname:
857 858 # Mark the new revision of this file as a copy of another
858 859 # file. This copy data will effectively act as a parent
859 860 # of this new revision. If this is a merge, the first
860 861 # parent will be the nullid (meaning "look up the copy data")
861 862 # and the second one will be the other parent. For example:
862 863 #
863 864 # 0 --- 1 --- 3 rev1 changes file foo
864 865 # \ / rev2 renames foo to bar and changes it
865 866 # \- 2 -/ rev3 should have bar with all changes and
866 867 # should record that bar descends from
867 868 # bar in rev2 and foo in rev1
868 869 #
869 870 # this allows this merge to succeed:
870 871 #
871 872 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
872 873 # \ / merging rev3 and rev4 should use bar@rev2
873 874 # \- 2 --- 4 as the merge base
874 875 #
875 876
876 877 cfname = copy[0]
877 878 crev = manifest1.get(cfname)
878 879 newfparent = fparent2
879 880
880 881 if manifest2: # branch merge
881 882 if fparent2 == nullid or crev is None: # copied on remote side
882 883 if cfname in manifest2:
883 884 crev = manifest2[cfname]
884 885 newfparent = fparent1
885 886
886 887 # find source in nearest ancestor if we've lost track
887 888 if not crev:
888 889 self.ui.debug(" %s: searching for copy revision for %s\n" %
889 890 (fname, cfname))
890 891 for ancestor in self[None].ancestors():
891 892 if cfname in ancestor:
892 893 crev = ancestor[cfname].filenode()
893 894 break
894 895
895 896 if crev:
896 897 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
897 898 meta["copy"] = cfname
898 899 meta["copyrev"] = hex(crev)
899 900 fparent1, fparent2 = nullid, newfparent
900 901 else:
901 902 self.ui.warn(_("warning: can't find ancestor for '%s' "
902 903 "copied from '%s'!\n") % (fname, cfname))
903 904
904 905 elif fparent2 != nullid:
905 906 # is one parent an ancestor of the other?
906 907 fparentancestor = flog.ancestor(fparent1, fparent2)
907 908 if fparentancestor == fparent1:
908 909 fparent1, fparent2 = fparent2, nullid
909 910 elif fparentancestor == fparent2:
910 911 fparent2 = nullid
911 912
912 913 # is the file changed?
913 914 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
914 915 changelist.append(fname)
915 916 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
916 917
917 918 # are just the flags changed during merge?
918 919 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
919 920 changelist.append(fname)
920 921
921 922 return fparent1
922 923
923 924 def commit(self, text="", user=None, date=None, match=None, force=False,
924 925 editor=False, extra={}):
925 926 """Add a new revision to current repository.
926 927
927 928 Revision information is gathered from the working directory,
928 929 match can be used to filter the committed files. If editor is
929 930 supplied, it is called to get a commit message.
930 931 """
931 932
932 933 def fail(f, msg):
933 934 raise util.Abort('%s: %s' % (f, msg))
934 935
935 936 if not match:
936 937 match = matchmod.always(self.root, '')
937 938
938 939 if not force:
939 940 vdirs = []
940 941 match.dir = vdirs.append
941 942 match.bad = fail
942 943
943 944 wlock = self.wlock()
944 945 try:
945 946 wctx = self[None]
946 947 merge = len(wctx.parents()) > 1
947 948
948 949 if (not force and merge and match and
949 950 (match.files() or match.anypats())):
950 951 raise util.Abort(_('cannot partially commit a merge '
951 952 '(do not specify files or patterns)'))
952 953
953 954 changes = self.status(match=match, clean=force)
954 955 if force:
955 956 changes[0].extend(changes[6]) # mq may commit unchanged files
956 957
957 958 # check subrepos
958 959 subs = []
959 960 removedsubs = set()
960 961 if '.hgsub' in wctx:
961 962 # only manage subrepos and .hgsubstate if .hgsub is present
962 963 for p in wctx.parents():
963 964 removedsubs.update(s for s in p.substate if match(s))
964 965 for s in wctx.substate:
965 966 removedsubs.discard(s)
966 967 if match(s) and wctx.sub(s).dirty():
967 968 subs.append(s)
968 969 if (subs or removedsubs):
969 970 if (not match('.hgsub') and
970 971 '.hgsub' in (wctx.modified() + wctx.added())):
971 972 raise util.Abort(
972 973 _("can't commit subrepos without .hgsub"))
973 974 if '.hgsubstate' not in changes[0]:
974 975 changes[0].insert(0, '.hgsubstate')
975 976 if '.hgsubstate' in changes[2]:
976 977 changes[2].remove('.hgsubstate')
977 978 elif '.hgsub' in changes[2]:
978 979 # clean up .hgsubstate when .hgsub is removed
979 980 if ('.hgsubstate' in wctx and
980 981 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
981 982 changes[2].insert(0, '.hgsubstate')
982 983
983 984 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
984 985 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
985 986 if changedsubs:
986 987 raise util.Abort(_("uncommitted changes in subrepo %s")
987 988 % changedsubs[0])
988 989
989 990 # make sure all explicit patterns are matched
990 991 if not force and match.files():
991 992 matched = set(changes[0] + changes[1] + changes[2])
992 993
993 994 for f in match.files():
994 995 if f == '.' or f in matched or f in wctx.substate:
995 996 continue
996 997 if f in changes[3]: # missing
997 998 fail(f, _('file not found!'))
998 999 if f in vdirs: # visited directory
999 1000 d = f + '/'
1000 1001 for mf in matched:
1001 1002 if mf.startswith(d):
1002 1003 break
1003 1004 else:
1004 1005 fail(f, _("no match under directory!"))
1005 1006 elif f not in self.dirstate:
1006 1007 fail(f, _("file not tracked!"))
1007 1008
1008 1009 if (not force and not extra.get("close") and not merge
1009 1010 and not (changes[0] or changes[1] or changes[2])
1010 1011 and wctx.branch() == wctx.p1().branch()):
1011 1012 return None
1012 1013
1013 1014 ms = mergemod.mergestate(self)
1014 1015 for f in changes[0]:
1015 1016 if f in ms and ms[f] == 'u':
1016 1017 raise util.Abort(_("unresolved merge conflicts "
1017 1018 "(see hg help resolve)"))
1018 1019
1019 1020 cctx = context.workingctx(self, text, user, date, extra, changes)
1020 1021 if editor:
1021 1022 cctx._text = editor(self, cctx, subs)
1022 1023 edited = (text != cctx._text)
1023 1024
1024 1025 # commit subs
1025 1026 if subs or removedsubs:
1026 1027 state = wctx.substate.copy()
1027 1028 for s in sorted(subs):
1028 1029 sub = wctx.sub(s)
1029 1030 self.ui.status(_('committing subrepository %s\n') %
1030 1031 subrepo.subrelpath(sub))
1031 1032 sr = sub.commit(cctx._text, user, date)
1032 1033 state[s] = (state[s][0], sr)
1033 1034 subrepo.writestate(self, state)
1034 1035
1035 1036 # Save commit message in case this transaction gets rolled back
1036 1037 # (e.g. by a pretxncommit hook). Leave the content alone on
1037 1038 # the assumption that the user will use the same editor again.
1038 1039 msgfn = self.savecommitmessage(cctx._text)
1039 1040
1040 1041 p1, p2 = self.dirstate.parents()
1041 1042 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1042 1043 try:
1043 1044 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1044 1045 ret = self.commitctx(cctx, True)
1045 1046 except:
1046 1047 if edited:
1047 1048 self.ui.write(
1048 1049 _('note: commit message saved in %s\n') % msgfn)
1049 1050 raise
1050 1051
1051 1052 # update bookmarks, dirstate and mergestate
1052 1053 bookmarks.update(self, p1, ret)
1053 1054 for f in changes[0] + changes[1]:
1054 1055 self.dirstate.normal(f)
1055 1056 for f in changes[2]:
1056 1057 self.dirstate.drop(f)
1057 1058 self.dirstate.setparents(ret)
1058 1059 ms.reset()
1059 1060 finally:
1060 1061 wlock.release()
1061 1062
1062 1063 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1063 1064 return ret
1064 1065
1065 1066 def commitctx(self, ctx, error=False):
1066 1067 """Add a new revision to current repository.
1067 1068 Revision information is passed via the context argument.
1068 1069 """
1069 1070
1070 1071 tr = lock = None
1071 1072 removed = list(ctx.removed())
1072 1073 p1, p2 = ctx.p1(), ctx.p2()
1073 1074 user = ctx.user()
1074 1075
1075 1076 lock = self.lock()
1076 1077 try:
1077 1078 tr = self.transaction("commit")
1078 1079 trp = weakref.proxy(tr)
1079 1080
1080 1081 if ctx.files():
1081 1082 m1 = p1.manifest().copy()
1082 1083 m2 = p2.manifest()
1083 1084
1084 1085 # check in files
1085 1086 new = {}
1086 1087 changed = []
1087 1088 linkrev = len(self)
1088 1089 for f in sorted(ctx.modified() + ctx.added()):
1089 1090 self.ui.note(f + "\n")
1090 1091 try:
1091 1092 fctx = ctx[f]
1092 1093 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1093 1094 changed)
1094 1095 m1.set(f, fctx.flags())
1095 1096 except OSError, inst:
1096 1097 self.ui.warn(_("trouble committing %s!\n") % f)
1097 1098 raise
1098 1099 except IOError, inst:
1099 1100 errcode = getattr(inst, 'errno', errno.ENOENT)
1100 1101 if error or errcode and errcode != errno.ENOENT:
1101 1102 self.ui.warn(_("trouble committing %s!\n") % f)
1102 1103 raise
1103 1104 else:
1104 1105 removed.append(f)
1105 1106
1106 1107 # update manifest
1107 1108 m1.update(new)
1108 1109 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1109 1110 drop = [f for f in removed if f in m1]
1110 1111 for f in drop:
1111 1112 del m1[f]
1112 1113 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1113 1114 p2.manifestnode(), (new, drop))
1114 1115 files = changed + removed
1115 1116 else:
1116 1117 mn = p1.manifestnode()
1117 1118 files = []
1118 1119
1119 1120 # update changelog
1120 1121 self.changelog.delayupdate()
1121 1122 n = self.changelog.add(mn, files, ctx.description(),
1122 1123 trp, p1.node(), p2.node(),
1123 1124 user, ctx.date(), ctx.extra().copy())
1124 1125 p = lambda: self.changelog.writepending() and self.root or ""
1125 1126 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1126 1127 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1127 1128 parent2=xp2, pending=p)
1128 1129 self.changelog.finalize(trp)
1129 1130 tr.close()
1130 1131
1131 1132 if self._branchcache:
1132 1133 self.updatebranchcache()
1133 1134 return n
1134 1135 finally:
1135 1136 if tr:
1136 1137 tr.release()
1137 1138 lock.release()
1138 1139
1139 1140 def destroyed(self):
1140 1141 '''Inform the repository that nodes have been destroyed.
1141 1142 Intended for use by strip and rollback, so there's a common
1142 1143 place for anything that has to be done after destroying history.'''
1143 1144 # XXX it might be nice if we could take the list of destroyed
1144 1145 # nodes, but I don't see an easy way for rollback() to do that
1145 1146
1146 1147 # Ensure the persistent tag cache is updated. Doing it now
1147 1148 # means that the tag cache only has to worry about destroyed
1148 1149 # heads immediately after a strip/rollback. That in turn
1149 1150 # guarantees that "cachetip == currenttip" (comparing both rev
1150 1151 # and node) always means no nodes have been added or destroyed.
1151 1152
1152 1153 # XXX this is suboptimal when qrefresh'ing: we strip the current
1153 1154 # head, refresh the tag cache, then immediately add a new head.
1154 1155 # But I think doing it this way is necessary for the "instant
1155 1156 # tag cache retrieval" case to work.
1156 1157 self.invalidatecaches()
1157 1158
1158 1159 def walk(self, match, node=None):
1159 1160 '''
1160 1161 walk recursively through the directory tree or a given
1161 1162 changeset, finding all files matched by the match
1162 1163 function
1163 1164 '''
1164 1165 return self[node].walk(match)
1165 1166
1166 1167 def status(self, node1='.', node2=None, match=None,
1167 1168 ignored=False, clean=False, unknown=False,
1168 1169 listsubrepos=False):
1169 1170 """return status of files between two nodes or node and working directory
1170 1171
1171 1172 If node1 is None, use the first dirstate parent instead.
1172 1173 If node2 is None, compare node1 with working directory.
1173 1174 """
1174 1175
1175 1176 def mfmatches(ctx):
1176 1177 mf = ctx.manifest().copy()
1177 1178 for fn in mf.keys():
1178 1179 if not match(fn):
1179 1180 del mf[fn]
1180 1181 return mf
1181 1182
1182 1183 if isinstance(node1, context.changectx):
1183 1184 ctx1 = node1
1184 1185 else:
1185 1186 ctx1 = self[node1]
1186 1187 if isinstance(node2, context.changectx):
1187 1188 ctx2 = node2
1188 1189 else:
1189 1190 ctx2 = self[node2]
1190 1191
1191 1192 working = ctx2.rev() is None
1192 1193 parentworking = working and ctx1 == self['.']
1193 1194 match = match or matchmod.always(self.root, self.getcwd())
1194 1195 listignored, listclean, listunknown = ignored, clean, unknown
1195 1196
1196 1197 # load earliest manifest first for caching reasons
1197 1198 if not working and ctx2.rev() < ctx1.rev():
1198 1199 ctx2.manifest()
1199 1200
1200 1201 if not parentworking:
1201 1202 def bad(f, msg):
1202 1203 if f not in ctx1:
1203 1204 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1204 1205 match.bad = bad
1205 1206
1206 1207 if working: # we need to scan the working dir
1207 1208 subrepos = []
1208 1209 if '.hgsub' in self.dirstate:
1209 1210 subrepos = ctx2.substate.keys()
1210 1211 s = self.dirstate.status(match, subrepos, listignored,
1211 1212 listclean, listunknown)
1212 1213 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1213 1214
1214 1215 # check for any possibly clean files
1215 1216 if parentworking and cmp:
1216 1217 fixup = []
1217 1218 # do a full compare of any files that might have changed
1218 1219 for f in sorted(cmp):
1219 1220 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1220 1221 or ctx1[f].cmp(ctx2[f])):
1221 1222 modified.append(f)
1222 1223 else:
1223 1224 fixup.append(f)
1224 1225
1225 1226 # update dirstate for files that are actually clean
1226 1227 if fixup:
1227 1228 if listclean:
1228 1229 clean += fixup
1229 1230
1230 1231 try:
1231 1232 # updating the dirstate is optional
1232 1233 # so we don't wait on the lock
1233 1234 wlock = self.wlock(False)
1234 1235 try:
1235 1236 for f in fixup:
1236 1237 self.dirstate.normal(f)
1237 1238 finally:
1238 1239 wlock.release()
1239 1240 except error.LockError:
1240 1241 pass
1241 1242
1242 1243 if not parentworking:
1243 1244 mf1 = mfmatches(ctx1)
1244 1245 if working:
1245 1246 # we are comparing working dir against non-parent
1246 1247 # generate a pseudo-manifest for the working dir
1247 1248 mf2 = mfmatches(self['.'])
1248 1249 for f in cmp + modified + added:
1249 1250 mf2[f] = None
1250 1251 mf2.set(f, ctx2.flags(f))
1251 1252 for f in removed:
1252 1253 if f in mf2:
1253 1254 del mf2[f]
1254 1255 else:
1255 1256 # we are comparing two revisions
1256 1257 deleted, unknown, ignored = [], [], []
1257 1258 mf2 = mfmatches(ctx2)
1258 1259
1259 1260 modified, added, clean = [], [], []
1260 1261 for fn in mf2:
1261 1262 if fn in mf1:
1262 1263 if (fn not in deleted and
1263 1264 (mf1.flags(fn) != mf2.flags(fn) or
1264 1265 (mf1[fn] != mf2[fn] and
1265 1266 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1266 1267 modified.append(fn)
1267 1268 elif listclean:
1268 1269 clean.append(fn)
1269 1270 del mf1[fn]
1270 1271 elif fn not in deleted:
1271 1272 added.append(fn)
1272 1273 removed = mf1.keys()
1273 1274
1274 1275 r = modified, added, removed, deleted, unknown, ignored, clean
1275 1276
1276 1277 if listsubrepos:
1277 1278 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1278 1279 if working:
1279 1280 rev2 = None
1280 1281 else:
1281 1282 rev2 = ctx2.substate[subpath][1]
1282 1283 try:
1283 1284 submatch = matchmod.narrowmatcher(subpath, match)
1284 1285 s = sub.status(rev2, match=submatch, ignored=listignored,
1285 1286 clean=listclean, unknown=listunknown,
1286 1287 listsubrepos=True)
1287 1288 for rfiles, sfiles in zip(r, s):
1288 1289 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1289 1290 except error.LookupError:
1290 1291 self.ui.status(_("skipping missing subrepository: %s\n")
1291 1292 % subpath)
1292 1293
1293 1294 for l in r:
1294 1295 l.sort()
1295 1296 return r
1296 1297
1297 1298 def heads(self, start=None):
1298 1299 heads = self.changelog.heads(start)
1299 1300 # sort the output in rev descending order
1300 1301 return sorted(heads, key=self.changelog.rev, reverse=True)
1301 1302
1302 1303 def branchheads(self, branch=None, start=None, closed=False):
1303 1304 '''return a (possibly filtered) list of heads for the given branch
1304 1305
1305 1306 Heads are returned in topological order, from newest to oldest.
1306 1307 If branch is None, use the dirstate branch.
1307 1308 If start is not None, return only heads reachable from start.
1308 1309 If closed is True, return heads that are marked as closed as well.
1309 1310 '''
1310 1311 if branch is None:
1311 1312 branch = self[None].branch()
1312 1313 branches = self.branchmap()
1313 1314 if branch not in branches:
1314 1315 return []
1315 1316 # the cache returns heads ordered lowest to highest
1316 1317 bheads = list(reversed(branches[branch]))
1317 1318 if start is not None:
1318 1319 # filter out the heads that cannot be reached from startrev
1319 1320 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1320 1321 bheads = [h for h in bheads if h in fbheads]
1321 1322 if not closed:
1322 1323 bheads = [h for h in bheads if
1323 1324 ('close' not in self.changelog.read(h)[5])]
1324 1325 return bheads
1325 1326
1326 1327 def branches(self, nodes):
1327 1328 if not nodes:
1328 1329 nodes = [self.changelog.tip()]
1329 1330 b = []
1330 1331 for n in nodes:
1331 1332 t = n
1332 1333 while True:
1333 1334 p = self.changelog.parents(n)
1334 1335 if p[1] != nullid or p[0] == nullid:
1335 1336 b.append((t, n, p[0], p[1]))
1336 1337 break
1337 1338 n = p[0]
1338 1339 return b
1339 1340
1340 1341 def between(self, pairs):
1341 1342 r = []
1342 1343
1343 1344 for top, bottom in pairs:
1344 1345 n, l, i = top, [], 0
1345 1346 f = 1
1346 1347
1347 1348 while n != bottom and n != nullid:
1348 1349 p = self.changelog.parents(n)[0]
1349 1350 if i == f:
1350 1351 l.append(n)
1351 1352 f = f * 2
1352 1353 n = p
1353 1354 i += 1
1354 1355
1355 1356 r.append(l)
1356 1357
1357 1358 return r
1358 1359
1359 1360 def pull(self, remote, heads=None, force=False):
1360 1361 lock = self.lock()
1361 1362 try:
1362 1363 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1363 1364 force=force)
1364 1365 common, fetch, rheads = tmp
1365 1366 if not fetch:
1366 1367 self.ui.status(_("no changes found\n"))
1367 1368 result = 0
1368 1369 else:
1369 1370 if heads is None and list(common) == [nullid]:
1370 1371 self.ui.status(_("requesting all changes\n"))
1371 1372 elif heads is None and remote.capable('changegroupsubset'):
1372 1373 # issue1320, avoid a race if remote changed after discovery
1373 1374 heads = rheads
1374 1375
1375 1376 if remote.capable('getbundle'):
1376 1377 cg = remote.getbundle('pull', common=common,
1377 1378 heads=heads or rheads)
1378 1379 elif heads is None:
1379 1380 cg = remote.changegroup(fetch, 'pull')
1380 1381 elif not remote.capable('changegroupsubset'):
1381 1382 raise util.Abort(_("partial pull cannot be done because "
1382 1383 "other repository doesn't support "
1383 1384 "changegroupsubset."))
1384 1385 else:
1385 1386 cg = remote.changegroupsubset(fetch, heads, 'pull')
1386 1387 result = self.addchangegroup(cg, 'pull', remote.url(),
1387 1388 lock=lock)
1388 1389 finally:
1389 1390 lock.release()
1390 1391
1391 1392 return result
1392 1393
1393 1394 def checkpush(self, force, revs):
1394 1395 """Extensions can override this function if additional checks have
1395 1396 to be performed before pushing, or call it if they override push
1396 1397 command.
1397 1398 """
1398 1399 pass
1399 1400
1400 1401 def push(self, remote, force=False, revs=None, newbranch=False):
1401 1402 '''Push outgoing changesets (limited by revs) from the current
1402 1403 repository to remote. Return an integer:
1403 1404 - 0 means HTTP error *or* nothing to push
1404 1405 - 1 means we pushed and remote head count is unchanged *or*
1405 1406 we have outgoing changesets but refused to push
1406 1407 - other values as described by addchangegroup()
1407 1408 '''
1408 1409 # there are two ways to push to remote repo:
1409 1410 #
1410 1411 # addchangegroup assumes local user can lock remote
1411 1412 # repo (local filesystem, old ssh servers).
1412 1413 #
1413 1414 # unbundle assumes local user cannot lock remote repo (new ssh
1414 1415 # servers, http servers).
1415 1416
1416 1417 self.checkpush(force, revs)
1417 1418 lock = None
1418 1419 unbundle = remote.capable('unbundle')
1419 1420 if not unbundle:
1420 1421 lock = remote.lock()
1421 1422 try:
1422 1423 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1423 1424 newbranch)
1424 1425 ret = remote_heads
1425 1426 if cg is not None:
1426 1427 if unbundle:
1427 1428 # local repo finds heads on server, finds out what
1428 1429 # revs it must push. once revs transferred, if server
1429 1430 # finds it has different heads (someone else won
1430 1431 # commit/push race), server aborts.
1431 1432 if force:
1432 1433 remote_heads = ['force']
1433 1434 # ssh: return remote's addchangegroup()
1434 1435 # http: return remote's addchangegroup() or 0 for error
1435 1436 ret = remote.unbundle(cg, remote_heads, 'push')
1436 1437 else:
1437 1438 # we return an integer indicating remote head count change
1438 1439 ret = remote.addchangegroup(cg, 'push', self.url(),
1439 1440 lock=lock)
1440 1441 finally:
1441 1442 if lock is not None:
1442 1443 lock.release()
1443 1444
1444 1445 self.ui.debug("checking for updated bookmarks\n")
1445 1446 rb = remote.listkeys('bookmarks')
1446 1447 for k in rb.keys():
1447 1448 if k in self._bookmarks:
1448 1449 nr, nl = rb[k], hex(self._bookmarks[k])
1449 1450 if nr in self:
1450 1451 cr = self[nr]
1451 1452 cl = self[nl]
1452 1453 if cl in cr.descendants():
1453 1454 r = remote.pushkey('bookmarks', k, nr, nl)
1454 1455 if r:
1455 1456 self.ui.status(_("updating bookmark %s\n") % k)
1456 1457 else:
1457 1458 self.ui.warn(_('updating bookmark %s'
1458 1459 ' failed!\n') % k)
1459 1460
1460 1461 return ret
1461 1462
1462 1463 def changegroupinfo(self, nodes, source):
1463 1464 if self.ui.verbose or source == 'bundle':
1464 1465 self.ui.status(_("%d changesets found\n") % len(nodes))
1465 1466 if self.ui.debugflag:
1466 1467 self.ui.debug("list of changesets:\n")
1467 1468 for node in nodes:
1468 1469 self.ui.debug("%s\n" % hex(node))
1469 1470
1470 1471 def changegroupsubset(self, bases, heads, source):
1471 1472 """Compute a changegroup consisting of all the nodes that are
1472 1473 descendants of any of the bases and ancestors of any of the heads.
1473 1474 Return a chunkbuffer object whose read() method will return
1474 1475 successive changegroup chunks.
1475 1476
1476 1477 It is fairly complex as determining which filenodes and which
1477 1478 manifest nodes need to be included for the changeset to be complete
1478 1479 is non-trivial.
1479 1480
1480 1481 Another wrinkle is doing the reverse, figuring out which changeset in
1481 1482 the changegroup a particular filenode or manifestnode belongs to.
1482 1483 """
1483 1484 cl = self.changelog
1484 1485 if not bases:
1485 1486 bases = [nullid]
1486 1487 csets, bases, heads = cl.nodesbetween(bases, heads)
1487 1488 # We assume that all ancestors of bases are known
1488 1489 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1489 1490 return self._changegroupsubset(common, csets, heads, source)
1490 1491
1491 1492 def getbundle(self, source, heads=None, common=None):
1492 1493 """Like changegroupsubset, but returns the set difference between the
1493 1494 ancestors of heads and the ancestors common.
1494 1495
1495 1496 If heads is None, use the local heads. If common is None, use [nullid].
1496 1497
1497 1498 The nodes in common might not all be known locally due to the way the
1498 1499 current discovery protocol works.
1499 1500 """
1500 1501 cl = self.changelog
1501 1502 if common:
1502 1503 nm = cl.nodemap
1503 1504 common = [n for n in common if n in nm]
1504 1505 else:
1505 1506 common = [nullid]
1506 1507 if not heads:
1507 1508 heads = cl.heads()
1508 1509 common, missing = cl.findcommonmissing(common, heads)
1509 1510 if not missing:
1510 1511 return None
1511 1512 return self._changegroupsubset(common, missing, heads, source)
1512 1513
1513 1514 def _changegroupsubset(self, commonrevs, csets, heads, source):
1514 1515
1515 1516 cl = self.changelog
1516 1517 mf = self.manifest
1517 1518 mfs = {} # needed manifests
1518 1519 fnodes = {} # needed file nodes
1519 1520 changedfiles = set()
1520 1521 fstate = ['', {}]
1521 1522 count = [0]
1522 1523
1523 1524 # can we go through the fast path ?
1524 1525 heads.sort()
1525 1526 if heads == sorted(self.heads()):
1526 1527 return self._changegroup(csets, source)
1527 1528
1528 1529 # slow path
1529 1530 self.hook('preoutgoing', throw=True, source=source)
1530 1531 self.changegroupinfo(csets, source)
1531 1532
1532 1533 # filter any nodes that claim to be part of the known set
1533 1534 def prune(revlog, missing):
1534 1535 return [n for n in missing
1535 1536 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1536 1537
1537 1538 def lookup(revlog, x):
1538 1539 if revlog == cl:
1539 1540 c = cl.read(x)
1540 1541 changedfiles.update(c[3])
1541 1542 mfs.setdefault(c[0], x)
1542 1543 count[0] += 1
1543 1544 self.ui.progress(_('bundling'), count[0],
1544 1545 unit=_('changesets'), total=len(csets))
1545 1546 return x
1546 1547 elif revlog == mf:
1547 1548 clnode = mfs[x]
1548 1549 mdata = mf.readfast(x)
1549 1550 for f in changedfiles:
1550 1551 if f in mdata:
1551 1552 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1552 1553 count[0] += 1
1553 1554 self.ui.progress(_('bundling'), count[0],
1554 1555 unit=_('manifests'), total=len(mfs))
1555 1556 return mfs[x]
1556 1557 else:
1557 1558 self.ui.progress(
1558 1559 _('bundling'), count[0], item=fstate[0],
1559 1560 unit=_('files'), total=len(changedfiles))
1560 1561 return fstate[1][x]
1561 1562
1562 1563 bundler = changegroup.bundle10(lookup)
1563 1564 reorder = self.ui.config('bundle', 'reorder', 'auto')
1564 1565 if reorder == 'auto':
1565 1566 reorder = None
1566 1567 else:
1567 1568 reorder = util.parsebool(reorder)
1568 1569
1569 1570 def gengroup():
1570 1571 # Create a changenode group generator that will call our functions
1571 1572 # back to lookup the owning changenode and collect information.
1572 1573 for chunk in cl.group(csets, bundler, reorder=reorder):
1573 1574 yield chunk
1574 1575 self.ui.progress(_('bundling'), None)
1575 1576
1576 1577 # Create a generator for the manifestnodes that calls our lookup
1577 1578 # and data collection functions back.
1578 1579 count[0] = 0
1579 1580 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1580 1581 yield chunk
1581 1582 self.ui.progress(_('bundling'), None)
1582 1583
1583 1584 mfs.clear()
1584 1585
1585 1586 # Go through all our files in order sorted by name.
1586 1587 count[0] = 0
1587 1588 for fname in sorted(changedfiles):
1588 1589 filerevlog = self.file(fname)
1589 1590 if not len(filerevlog):
1590 1591 raise util.Abort(_("empty or missing revlog for %s") % fname)
1591 1592 fstate[0] = fname
1592 1593 fstate[1] = fnodes.pop(fname, {})
1593 1594
1594 1595 nodelist = prune(filerevlog, fstate[1])
1595 1596 if nodelist:
1596 1597 count[0] += 1
1597 1598 yield bundler.fileheader(fname)
1598 1599 for chunk in filerevlog.group(nodelist, bundler, reorder):
1599 1600 yield chunk
1600 1601
1601 1602 # Signal that no more groups are left.
1602 1603 yield bundler.close()
1603 1604 self.ui.progress(_('bundling'), None)
1604 1605
1605 1606 if csets:
1606 1607 self.hook('outgoing', node=hex(csets[0]), source=source)
1607 1608
1608 1609 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1609 1610
1610 1611 def changegroup(self, basenodes, source):
1611 1612 # to avoid a race we use changegroupsubset() (issue1320)
1612 1613 return self.changegroupsubset(basenodes, self.heads(), source)
1613 1614
1614 1615 def _changegroup(self, nodes, source):
1615 1616 """Compute the changegroup of all nodes that we have that a recipient
1616 1617 doesn't. Return a chunkbuffer object whose read() method will return
1617 1618 successive changegroup chunks.
1618 1619
1619 1620 This is much easier than the previous function as we can assume that
1620 1621 the recipient has any changenode we aren't sending them.
1621 1622
1622 1623 nodes is the set of nodes to send"""
1623 1624
1624 1625 cl = self.changelog
1625 1626 mf = self.manifest
1626 1627 mfs = {}
1627 1628 changedfiles = set()
1628 1629 fstate = ['']
1629 1630 count = [0]
1630 1631
1631 1632 self.hook('preoutgoing', throw=True, source=source)
1632 1633 self.changegroupinfo(nodes, source)
1633 1634
1634 1635 revset = set([cl.rev(n) for n in nodes])
1635 1636
1636 1637 def gennodelst(log):
1637 1638 return [log.node(r) for r in log if log.linkrev(r) in revset]
1638 1639
1639 1640 def lookup(revlog, x):
1640 1641 if revlog == cl:
1641 1642 c = cl.read(x)
1642 1643 changedfiles.update(c[3])
1643 1644 mfs.setdefault(c[0], x)
1644 1645 count[0] += 1
1645 1646 self.ui.progress(_('bundling'), count[0],
1646 1647 unit=_('changesets'), total=len(nodes))
1647 1648 return x
1648 1649 elif revlog == mf:
1649 1650 count[0] += 1
1650 1651 self.ui.progress(_('bundling'), count[0],
1651 1652 unit=_('manifests'), total=len(mfs))
1652 1653 return cl.node(revlog.linkrev(revlog.rev(x)))
1653 1654 else:
1654 1655 self.ui.progress(
1655 1656 _('bundling'), count[0], item=fstate[0],
1656 1657 total=len(changedfiles), unit=_('files'))
1657 1658 return cl.node(revlog.linkrev(revlog.rev(x)))
1658 1659
1659 1660 bundler = changegroup.bundle10(lookup)
1660 1661 reorder = self.ui.config('bundle', 'reorder', 'auto')
1661 1662 if reorder == 'auto':
1662 1663 reorder = None
1663 1664 else:
1664 1665 reorder = util.parsebool(reorder)
1665 1666
1666 1667 def gengroup():
1667 1668 '''yield a sequence of changegroup chunks (strings)'''
1668 1669 # construct a list of all changed files
1669 1670
1670 1671 for chunk in cl.group(nodes, bundler, reorder=reorder):
1671 1672 yield chunk
1672 1673 self.ui.progress(_('bundling'), None)
1673 1674
1674 1675 count[0] = 0
1675 1676 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1676 1677 yield chunk
1677 1678 self.ui.progress(_('bundling'), None)
1678 1679
1679 1680 count[0] = 0
1680 1681 for fname in sorted(changedfiles):
1681 1682 filerevlog = self.file(fname)
1682 1683 if not len(filerevlog):
1683 1684 raise util.Abort(_("empty or missing revlog for %s") % fname)
1684 1685 fstate[0] = fname
1685 1686 nodelist = gennodelst(filerevlog)
1686 1687 if nodelist:
1687 1688 count[0] += 1
1688 1689 yield bundler.fileheader(fname)
1689 1690 for chunk in filerevlog.group(nodelist, bundler, reorder):
1690 1691 yield chunk
1691 1692 yield bundler.close()
1692 1693 self.ui.progress(_('bundling'), None)
1693 1694
1694 1695 if nodes:
1695 1696 self.hook('outgoing', node=hex(nodes[0]), source=source)
1696 1697
1697 1698 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1698 1699
1699 1700 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1700 1701 """Add the changegroup returned by source.read() to this repo.
1701 1702 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1702 1703 the URL of the repo where this changegroup is coming from.
1703 1704 If lock is not None, the function takes ownership of the lock
1704 1705 and releases it after the changegroup is added.
1705 1706
1706 1707 Return an integer summarizing the change to this repo:
1707 1708 - nothing changed or no source: 0
1708 1709 - more heads than before: 1+added heads (2..n)
1709 1710 - fewer heads than before: -1-removed heads (-2..-n)
1710 1711 - number of heads stays the same: 1
1711 1712 """
1712 1713 def csmap(x):
1713 1714 self.ui.debug("add changeset %s\n" % short(x))
1714 1715 return len(cl)
1715 1716
1716 1717 def revmap(x):
1717 1718 return cl.rev(x)
1718 1719
1719 1720 if not source:
1720 1721 return 0
1721 1722
1722 1723 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1723 1724
1724 1725 changesets = files = revisions = 0
1725 1726 efiles = set()
1726 1727
1727 1728 # write changelog data to temp files so concurrent readers will not see
1728 1729 # inconsistent view
1729 1730 cl = self.changelog
1730 1731 cl.delayupdate()
1731 1732 oldheads = cl.heads()
1732 1733
1733 1734 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1734 1735 try:
1735 1736 trp = weakref.proxy(tr)
1736 1737 # pull off the changeset group
1737 1738 self.ui.status(_("adding changesets\n"))
1738 1739 clstart = len(cl)
1739 1740 class prog(object):
1740 1741 step = _('changesets')
1741 1742 count = 1
1742 1743 ui = self.ui
1743 1744 total = None
1744 1745 def __call__(self):
1745 1746 self.ui.progress(self.step, self.count, unit=_('chunks'),
1746 1747 total=self.total)
1747 1748 self.count += 1
1748 1749 pr = prog()
1749 1750 source.callback = pr
1750 1751
1751 1752 source.changelogheader()
1752 1753 if (cl.addgroup(source, csmap, trp) is None
1753 1754 and not emptyok):
1754 1755 raise util.Abort(_("received changelog group is empty"))
1755 1756 clend = len(cl)
1756 1757 changesets = clend - clstart
1757 1758 for c in xrange(clstart, clend):
1758 1759 efiles.update(self[c].files())
1759 1760 efiles = len(efiles)
1760 1761 self.ui.progress(_('changesets'), None)
1761 1762
1762 1763 # pull off the manifest group
1763 1764 self.ui.status(_("adding manifests\n"))
1764 1765 pr.step = _('manifests')
1765 1766 pr.count = 1
1766 1767 pr.total = changesets # manifests <= changesets
1767 1768 # no need to check for empty manifest group here:
1768 1769 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1769 1770 # no new manifest will be created and the manifest group will
1770 1771 # be empty during the pull
1771 1772 source.manifestheader()
1772 1773 self.manifest.addgroup(source, revmap, trp)
1773 1774 self.ui.progress(_('manifests'), None)
1774 1775
1775 1776 needfiles = {}
1776 1777 if self.ui.configbool('server', 'validate', default=False):
1777 1778 # validate incoming csets have their manifests
1778 1779 for cset in xrange(clstart, clend):
1779 1780 mfest = self.changelog.read(self.changelog.node(cset))[0]
1780 1781 mfest = self.manifest.readdelta(mfest)
1781 1782 # store file nodes we must see
1782 1783 for f, n in mfest.iteritems():
1783 1784 needfiles.setdefault(f, set()).add(n)
1784 1785
1785 1786 # process the files
1786 1787 self.ui.status(_("adding file changes\n"))
1787 1788 pr.step = _('files')
1788 1789 pr.count = 1
1789 1790 pr.total = efiles
1790 1791 source.callback = None
1791 1792
1792 1793 while True:
1793 1794 chunkdata = source.filelogheader()
1794 1795 if not chunkdata:
1795 1796 break
1796 1797 f = chunkdata["filename"]
1797 1798 self.ui.debug("adding %s revisions\n" % f)
1798 1799 pr()
1799 1800 fl = self.file(f)
1800 1801 o = len(fl)
1801 1802 if fl.addgroup(source, revmap, trp) is None:
1802 1803 raise util.Abort(_("received file revlog group is empty"))
1803 1804 revisions += len(fl) - o
1804 1805 files += 1
1805 1806 if f in needfiles:
1806 1807 needs = needfiles[f]
1807 1808 for new in xrange(o, len(fl)):
1808 1809 n = fl.node(new)
1809 1810 if n in needs:
1810 1811 needs.remove(n)
1811 1812 if not needs:
1812 1813 del needfiles[f]
1813 1814 self.ui.progress(_('files'), None)
1814 1815
1815 1816 for f, needs in needfiles.iteritems():
1816 1817 fl = self.file(f)
1817 1818 for n in needs:
1818 1819 try:
1819 1820 fl.rev(n)
1820 1821 except error.LookupError:
1821 1822 raise util.Abort(
1822 1823 _('missing file data for %s:%s - run hg verify') %
1823 1824 (f, hex(n)))
1824 1825
1825 1826 dh = 0
1826 1827 if oldheads:
1827 1828 heads = cl.heads()
1828 1829 dh = len(heads) - len(oldheads)
1829 1830 for h in heads:
1830 1831 if h not in oldheads and 'close' in self[h].extra():
1831 1832 dh -= 1
1832 1833 htext = ""
1833 1834 if dh:
1834 1835 htext = _(" (%+d heads)") % dh
1835 1836
1836 1837 self.ui.status(_("added %d changesets"
1837 1838 " with %d changes to %d files%s\n")
1838 1839 % (changesets, revisions, files, htext))
1839 1840
1840 1841 if changesets > 0:
1841 1842 p = lambda: cl.writepending() and self.root or ""
1842 1843 self.hook('pretxnchangegroup', throw=True,
1843 1844 node=hex(cl.node(clstart)), source=srctype,
1844 1845 url=url, pending=p)
1845 1846
1846 1847 # make changelog see real files again
1847 1848 cl.finalize(trp)
1848 1849
1849 1850 tr.close()
1850 1851 finally:
1851 1852 tr.release()
1852 1853 if lock:
1853 1854 lock.release()
1854 1855
1855 1856 if changesets > 0:
1856 1857 # forcefully update the on-disk branch cache
1857 1858 self.ui.debug("updating the branch cache\n")
1858 1859 self.updatebranchcache()
1859 1860 self.hook("changegroup", node=hex(cl.node(clstart)),
1860 1861 source=srctype, url=url)
1861 1862
1862 1863 for i in xrange(clstart, clend):
1863 1864 self.hook("incoming", node=hex(cl.node(i)),
1864 1865 source=srctype, url=url)
1865 1866
1866 1867 # never return 0 here:
1867 1868 if dh < 0:
1868 1869 return dh - 1
1869 1870 else:
1870 1871 return dh + 1
1871 1872
1872 1873 def stream_in(self, remote, requirements):
1873 1874 lock = self.lock()
1874 1875 try:
1875 1876 fp = remote.stream_out()
1876 1877 l = fp.readline()
1877 1878 try:
1878 1879 resp = int(l)
1879 1880 except ValueError:
1880 1881 raise error.ResponseError(
1881 1882 _('Unexpected response from remote server:'), l)
1882 1883 if resp == 1:
1883 1884 raise util.Abort(_('operation forbidden by server'))
1884 1885 elif resp == 2:
1885 1886 raise util.Abort(_('locking the remote repository failed'))
1886 1887 elif resp != 0:
1887 1888 raise util.Abort(_('the server sent an unknown error code'))
1888 1889 self.ui.status(_('streaming all changes\n'))
1889 1890 l = fp.readline()
1890 1891 try:
1891 1892 total_files, total_bytes = map(int, l.split(' ', 1))
1892 1893 except (ValueError, TypeError):
1893 1894 raise error.ResponseError(
1894 1895 _('Unexpected response from remote server:'), l)
1895 1896 self.ui.status(_('%d files to transfer, %s of data\n') %
1896 1897 (total_files, util.bytecount(total_bytes)))
1897 1898 start = time.time()
1898 1899 for i in xrange(total_files):
1899 1900 # XXX doesn't support '\n' or '\r' in filenames
1900 1901 l = fp.readline()
1901 1902 try:
1902 1903 name, size = l.split('\0', 1)
1903 1904 size = int(size)
1904 1905 except (ValueError, TypeError):
1905 1906 raise error.ResponseError(
1906 1907 _('Unexpected response from remote server:'), l)
1907 1908 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1908 1909 # for backwards compat, name was partially encoded
1909 1910 ofp = self.sopener(store.decodedir(name), 'w')
1910 1911 for chunk in util.filechunkiter(fp, limit=size):
1911 1912 ofp.write(chunk)
1912 1913 ofp.close()
1913 1914 elapsed = time.time() - start
1914 1915 if elapsed <= 0:
1915 1916 elapsed = 0.001
1916 1917 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1917 1918 (util.bytecount(total_bytes), elapsed,
1918 1919 util.bytecount(total_bytes / elapsed)))
1919 1920
1920 1921 # new requirements = old non-format requirements + new format-related
1921 1922 # requirements from the streamed-in repository
1922 1923 requirements.update(set(self.requirements) - self.supportedformats)
1923 1924 self._applyrequirements(requirements)
1924 1925 self._writerequirements()
1925 1926
1926 1927 self.invalidate()
1927 1928 return len(self.heads()) + 1
1928 1929 finally:
1929 1930 lock.release()
1930 1931
1931 1932 def clone(self, remote, heads=[], stream=False):
1932 1933 '''clone remote repository.
1933 1934
1934 1935 keyword arguments:
1935 1936 heads: list of revs to clone (forces use of pull)
1936 1937 stream: use streaming clone if possible'''
1937 1938
1938 1939 # now, all clients that can request uncompressed clones can
1939 1940 # read repo formats supported by all servers that can serve
1940 1941 # them.
1941 1942
1942 1943 # if revlog format changes, client will have to check version
1943 1944 # and format flags on "stream" capability, and use
1944 1945 # uncompressed only if compatible.
1945 1946
1946 1947 if stream and not heads:
1947 1948 # 'stream' means remote revlog format is revlogv1 only
1948 1949 if remote.capable('stream'):
1949 1950 return self.stream_in(remote, set(('revlogv1',)))
1950 1951 # otherwise, 'streamreqs' contains the remote revlog format
1951 1952 streamreqs = remote.capable('streamreqs')
1952 1953 if streamreqs:
1953 1954 streamreqs = set(streamreqs.split(','))
1954 1955 # if we support it, stream in and adjust our requirements
1955 1956 if not streamreqs - self.supportedformats:
1956 1957 return self.stream_in(remote, streamreqs)
1957 1958 return self.pull(remote, heads)
1958 1959
1959 1960 def pushkey(self, namespace, key, old, new):
1960 1961 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1961 1962 old=old, new=new)
1962 1963 ret = pushkey.push(self, namespace, key, old, new)
1963 1964 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1964 1965 ret=ret)
1965 1966 return ret
1966 1967
1967 1968 def listkeys(self, namespace):
1968 1969 self.hook('prelistkeys', throw=True, namespace=namespace)
1969 1970 values = pushkey.list(self, namespace)
1970 1971 self.hook('listkeys', namespace=namespace, values=values)
1971 1972 return values
1972 1973
1973 1974 def debugwireargs(self, one, two, three=None, four=None, five=None):
1974 1975 '''used to test argument passing over the wire'''
1975 1976 return "%s %s %s %s %s" % (one, two, three, four, five)
1976 1977
1977 1978 def savecommitmessage(self, text):
1978 1979 fp = self.opener('last-message.txt', 'wb')
1979 1980 try:
1980 1981 fp.write(text)
1981 1982 finally:
1982 1983 fp.close()
1983 1984 return self.pathto(fp.name[len(self.root)+1:])
1984 1985
1985 1986 # used to avoid circular references so destructors work
1986 1987 def aftertrans(files):
1987 1988 renamefiles = [tuple(t) for t in files]
1988 1989 def a():
1989 1990 for src, dest in renamefiles:
1990 1991 util.rename(src, dest)
1991 1992 return a
1992 1993
1993 1994 def undoname(fn):
1994 1995 base, name = os.path.split(fn)
1995 1996 assert name.startswith('journal')
1996 1997 return os.path.join(base, name.replace('journal', 'undo', 1))
1997 1998
1998 1999 def instance(ui, path, create):
1999 2000 return localrepository(ui, util.urllocalpath(path), create)
2000 2001
2001 2002 def islocal(path):
2002 2003 return True
@@ -1,194 +1,194 b''
1 1 This test tries to exercise the ssh functionality with a dummy script
2 2
3 3 $ checknewrepo()
4 4 > {
5 5 > name=$1
6 6 > if [ -d "$name"/.hg/store ]; then
7 7 > echo store created
8 8 > fi
9 9 > if [ -f "$name"/.hg/00changelog.i ]; then
10 10 > echo 00changelog.i created
11 11 > fi
12 12 > cat "$name"/.hg/requires
13 13 > }
14 14
15 15 creating 'local'
16 16
17 17 $ hg init local
18 18 $ checknewrepo local
19 19 store created
20 20 00changelog.i created
21 21 revlogv1
22 fncache
22 23 store
23 fncache
24 24 dotencode
25 25 $ echo this > local/foo
26 26 $ hg ci --cwd local -A -m "init"
27 27 adding foo
28 28
29 29 creating repo with format.usestore=false
30 30
31 31 $ hg --config format.usestore=false init old
32 32 $ checknewrepo old
33 33 revlogv1
34 34
35 35 creating repo with format.usefncache=false
36 36
37 37 $ hg --config format.usefncache=false init old2
38 38 $ checknewrepo old2
39 39 store created
40 40 00changelog.i created
41 41 revlogv1
42 42 store
43 43
44 44 creating repo with format.dotencode=false
45 45
46 46 $ hg --config format.dotencode=false init old3
47 47 $ checknewrepo old3
48 48 store created
49 49 00changelog.i created
50 50 revlogv1
51 fncache
51 52 store
52 fncache
53 53
54 54 test failure
55 55
56 56 $ hg init local
57 57 abort: repository local already exists!
58 58 [255]
59 59
60 60 init+push to remote2
61 61
62 62 $ hg init -e "python $TESTDIR/dummyssh" ssh://user@dummy/remote2
63 63 $ hg incoming -R remote2 local
64 64 comparing with local
65 65 changeset: 0:08b9e9f63b32
66 66 tag: tip
67 67 user: test
68 68 date: Thu Jan 01 00:00:00 1970 +0000
69 69 summary: init
70 70
71 71
72 72 $ hg push -R local -e "python $TESTDIR/dummyssh" ssh://user@dummy/remote2
73 73 pushing to ssh://user@dummy/remote2
74 74 searching for changes
75 75 remote: adding changesets
76 76 remote: adding manifests
77 77 remote: adding file changes
78 78 remote: added 1 changesets with 1 changes to 1 files
79 79
80 80 clone to remote1
81 81
82 82 $ hg clone -e "python $TESTDIR/dummyssh" local ssh://user@dummy/remote1
83 83 searching for changes
84 84 remote: adding changesets
85 85 remote: adding manifests
86 86 remote: adding file changes
87 87 remote: added 1 changesets with 1 changes to 1 files
88 88
89 89 init to existing repo
90 90
91 91 $ hg init -e "python $TESTDIR/dummyssh" ssh://user@dummy/remote1
92 92 abort: repository remote1 already exists!
93 93 abort: could not create remote repo!
94 94 [255]
95 95
96 96 clone to existing repo
97 97
98 98 $ hg clone -e "python $TESTDIR/dummyssh" local ssh://user@dummy/remote1
99 99 abort: repository remote1 already exists!
100 100 abort: could not create remote repo!
101 101 [255]
102 102
103 103 output of dummyssh
104 104
105 105 $ cat dummylog
106 106 Got arguments 1:user@dummy 2:hg init remote2
107 107 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
108 108 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
109 109 Got arguments 1:user@dummy 2:hg init remote1
110 110 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio
111 111 Got arguments 1:user@dummy 2:hg init remote1
112 112 Got arguments 1:user@dummy 2:hg init remote1
113 113
114 114 comparing repositories
115 115
116 116 $ hg tip -q -R local
117 117 0:08b9e9f63b32
118 118 $ hg tip -q -R remote1
119 119 0:08b9e9f63b32
120 120 $ hg tip -q -R remote2
121 121 0:08b9e9f63b32
122 122
123 123 check names for repositories (clashes with URL schemes, special chars)
124 124
125 125 $ for i in bundle file hg http https old-http ssh static-http " " "with space"; do
126 126 > printf "hg init \"$i\"... "
127 127 > hg init "$i"
128 128 > test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
129 129 > done
130 130 hg init "bundle"... ok
131 131 hg init "file"... ok
132 132 hg init "hg"... ok
133 133 hg init "http"... ok
134 134 hg init "https"... ok
135 135 hg init "old-http"... ok
136 136 hg init "ssh"... ok
137 137 hg init "static-http"... ok
138 138 hg init " "... ok
139 139 hg init "with space"... ok
140 140
141 141 creating 'local/sub/repo'
142 142
143 143 $ hg init local/sub/repo
144 144 $ checknewrepo local/sub/repo
145 145 store created
146 146 00changelog.i created
147 147 revlogv1
148 fncache
148 149 store
149 fncache
150 150 dotencode
151 151
152 152 prepare test of init of url configured from paths
153 153
154 154 $ echo '[paths]' >> $HGRCPATH
155 155 $ echo "somewhere = `pwd`/url from paths" >> $HGRCPATH
156 156 $ echo "elsewhere = `pwd`/another paths url" >> $HGRCPATH
157 157
158 158 init should (for consistency with clone) expand the url
159 159
160 160 $ hg init somewhere
161 161 $ checknewrepo "url from paths"
162 162 store created
163 163 00changelog.i created
164 164 revlogv1
165 fncache
165 166 store
166 fncache
167 167 dotencode
168 168
169 169 verify that clone also expand urls
170 170
171 171 $ hg clone somewhere elsewhere
172 172 updating to branch default
173 173 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 174 $ checknewrepo "another paths url"
175 175 store created
176 176 00changelog.i created
177 177 revlogv1
178 fncache
178 179 store
179 fncache
180 180 dotencode
181 181
182 182 clone bookmarks
183 183
184 184 $ hg -R local bookmark test
185 185 $ hg -R local bookmarks
186 186 * test 0:08b9e9f63b32
187 187 $ hg clone -e "python $TESTDIR/dummyssh" local ssh://user@dummy/remote-bookmarks
188 188 searching for changes
189 189 remote: adding changesets
190 190 remote: adding manifests
191 191 remote: adding file changes
192 192 remote: added 1 changesets with 1 changes to 1 files
193 193 $ hg -R remote-bookmarks bookmarks
194 194 test 0:08b9e9f63b32
General Comments 0
You need to be logged in to leave comments. Login now