##// END OF EJS Templates
localrepo: don't add deleted files to list of modified/added files (issue2761)...
Idan Kamara -
r13929:cff56a0e default
parent child Browse files
Show More
@@ -0,0 +1,23 b''
1 Test issue2761
2
3 $ hg init
4
5 $ touch to-be-deleted
6 $ hg add
7 adding to-be-deleted
8 $ hg ci -m first
9 $ echo a > to-be-deleted
10 $ hg ci -m second
11 $ rm to-be-deleted
12 $ hg diff -r 0
13
14 Same issue, different code path
15
16 $ hg up -C
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 $ touch doesnt-exist-in-1
19 $ hg add
20 adding doesnt-exist-in-1
21 $ hg ci -m third
22 $ rm doesnt-exist-in-1
23 $ hg diff -r 1
@@ -1,1934 +1,1935 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'parentdelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=0):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = util.path_auditor(self.root, self._checknested)
35 35 self.opener = util.opener(self.path)
36 36 self.wopener = util.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener("00changelog.i", "a").write(
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 if self.ui.configbool('format', 'parentdelta', False):
65 65 requirements.append("parentdelta")
66 66 else:
67 67 raise error.RepoError(_("repository %s not found") % path)
68 68 elif create:
69 69 raise error.RepoError(_("repository %s already exists") % path)
70 70 else:
71 71 # find requirements
72 72 requirements = set()
73 73 try:
74 74 requirements = set(self.opener("requires").read().splitlines())
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 for r in requirements - self.supported:
79 79 raise error.RequirementError(
80 80 _("requirement '%s' not supported") % r)
81 81
82 82 self.sharedpath = self.path
83 83 try:
84 84 s = os.path.realpath(self.opener("sharedpath").read())
85 85 if not os.path.exists(s):
86 86 raise error.RepoError(
87 87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 88 self.sharedpath = s
89 89 except IOError, inst:
90 90 if inst.errno != errno.ENOENT:
91 91 raise
92 92
93 93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 94 self.spath = self.store.path
95 95 self.sopener = self.store.opener
96 96 self.sjoin = self.store.join
97 97 self.opener.createmode = self.store.createmode
98 98 self._applyrequirements(requirements)
99 99 if create:
100 100 self._writerequirements()
101 101
102 102 # These two define the set of tags for this repository. _tags
103 103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 104 # 'local'. (Global tags are defined by .hgtags across all
105 105 # heads, and local tags are defined in .hg/localtags.) They
106 106 # constitute the in-memory cache of tags.
107 107 self._tags = None
108 108 self._tagtypes = None
109 109
110 110 self._branchcache = None
111 111 self._branchcachetip = None
112 112 self.nodetagscache = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 def _applyrequirements(self, requirements):
118 118 self.requirements = requirements
119 119 self.sopener.options = {}
120 120 if 'parentdelta' in requirements:
121 121 self.sopener.options['parentdelta'] = 1
122 122
123 123 def _writerequirements(self):
124 124 reqfile = self.opener("requires", "w")
125 125 for r in self.requirements:
126 126 reqfile.write("%s\n" % r)
127 127 reqfile.close()
128 128
129 129 def _checknested(self, path):
130 130 """Determine if path is a legal nested repository."""
131 131 if not path.startswith(self.root):
132 132 return False
133 133 subpath = path[len(self.root) + 1:]
134 134
135 135 # XXX: Checking against the current working copy is wrong in
136 136 # the sense that it can reject things like
137 137 #
138 138 # $ hg cat -r 10 sub/x.txt
139 139 #
140 140 # if sub/ is no longer a subrepository in the working copy
141 141 # parent revision.
142 142 #
143 143 # However, it can of course also allow things that would have
144 144 # been rejected before, such as the above cat command if sub/
145 145 # is a subrepository now, but was a normal directory before.
146 146 # The old path auditor would have rejected by mistake since it
147 147 # panics when it sees sub/.hg/.
148 148 #
149 149 # All in all, checking against the working copy seems sensible
150 150 # since we want to prevent access to nested repositories on
151 151 # the filesystem *now*.
152 152 ctx = self[None]
153 153 parts = util.splitpath(subpath)
154 154 while parts:
155 155 prefix = os.sep.join(parts)
156 156 if prefix in ctx.substate:
157 157 if prefix == subpath:
158 158 return True
159 159 else:
160 160 sub = ctx.sub(prefix)
161 161 return sub.checknested(subpath[len(prefix) + 1:])
162 162 else:
163 163 parts.pop()
164 164 return False
165 165
166 166 @util.propertycache
167 167 def _bookmarks(self):
168 168 return bookmarks.read(self)
169 169
170 170 @util.propertycache
171 171 def _bookmarkcurrent(self):
172 172 return bookmarks.readcurrent(self)
173 173
174 174 @propertycache
175 175 def changelog(self):
176 176 c = changelog.changelog(self.sopener)
177 177 if 'HG_PENDING' in os.environ:
178 178 p = os.environ['HG_PENDING']
179 179 if p.startswith(self.root):
180 180 c.readpending('00changelog.i.a')
181 181 self.sopener.options['defversion'] = c.version
182 182 return c
183 183
184 184 @propertycache
185 185 def manifest(self):
186 186 return manifest.manifest(self.sopener)
187 187
188 188 @propertycache
189 189 def dirstate(self):
190 190 warned = [0]
191 191 def validate(node):
192 192 try:
193 193 r = self.changelog.rev(node)
194 194 return node
195 195 except error.LookupError:
196 196 if not warned[0]:
197 197 warned[0] = True
198 198 self.ui.warn(_("warning: ignoring unknown"
199 199 " working parent %s!\n") % short(node))
200 200 return nullid
201 201
202 202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203 203
204 204 def __getitem__(self, changeid):
205 205 if changeid is None:
206 206 return context.workingctx(self)
207 207 return context.changectx(self, changeid)
208 208
209 209 def __contains__(self, changeid):
210 210 try:
211 211 return bool(self.lookup(changeid))
212 212 except error.RepoLookupError:
213 213 return False
214 214
215 215 def __nonzero__(self):
216 216 return True
217 217
218 218 def __len__(self):
219 219 return len(self.changelog)
220 220
221 221 def __iter__(self):
222 222 for i in xrange(len(self)):
223 223 yield i
224 224
225 225 def url(self):
226 226 return 'file:' + self.root
227 227
228 228 def hook(self, name, throw=False, **args):
229 229 return hook.hook(self.ui, self, name, throw, **args)
230 230
231 231 tag_disallowed = ':\r\n'
232 232
233 233 def _tag(self, names, node, message, local, user, date, extra={}):
234 234 if isinstance(names, str):
235 235 allchars = names
236 236 names = (names,)
237 237 else:
238 238 allchars = ''.join(names)
239 239 for c in self.tag_disallowed:
240 240 if c in allchars:
241 241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242 242
243 243 branches = self.branchmap()
244 244 for name in names:
245 245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 246 local=local)
247 247 if name in branches:
248 248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 249 " branch name\n") % name)
250 250
251 251 def writetags(fp, names, munge, prevtags):
252 252 fp.seek(0, 2)
253 253 if prevtags and prevtags[-1] != '\n':
254 254 fp.write('\n')
255 255 for name in names:
256 256 m = munge and munge(name) or name
257 257 if self._tagtypes and name in self._tagtypes:
258 258 old = self._tags.get(name, nullid)
259 259 fp.write('%s %s\n' % (hex(old), m))
260 260 fp.write('%s %s\n' % (hex(node), m))
261 261 fp.close()
262 262
263 263 prevtags = ''
264 264 if local:
265 265 try:
266 266 fp = self.opener('localtags', 'r+')
267 267 except IOError:
268 268 fp = self.opener('localtags', 'a')
269 269 else:
270 270 prevtags = fp.read()
271 271
272 272 # local tags are stored in the current charset
273 273 writetags(fp, names, None, prevtags)
274 274 for name in names:
275 275 self.hook('tag', node=hex(node), tag=name, local=local)
276 276 return
277 277
278 278 try:
279 279 fp = self.wfile('.hgtags', 'rb+')
280 280 except IOError:
281 281 fp = self.wfile('.hgtags', 'ab')
282 282 else:
283 283 prevtags = fp.read()
284 284
285 285 # committed tags are stored in UTF-8
286 286 writetags(fp, names, encoding.fromlocal, prevtags)
287 287
288 288 fp.close()
289 289
290 290 if '.hgtags' not in self.dirstate:
291 291 self[None].add(['.hgtags'])
292 292
293 293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295 295
296 296 for name in names:
297 297 self.hook('tag', node=hex(node), tag=name, local=local)
298 298
299 299 return tagnode
300 300
301 301 def tag(self, names, node, message, local, user, date):
302 302 '''tag a revision with one or more symbolic names.
303 303
304 304 names is a list of strings or, when adding a single tag, names may be a
305 305 string.
306 306
307 307 if local is True, the tags are stored in a per-repository file.
308 308 otherwise, they are stored in the .hgtags file, and a new
309 309 changeset is committed with the change.
310 310
311 311 keyword arguments:
312 312
313 313 local: whether to store tags in non-version-controlled file
314 314 (default False)
315 315
316 316 message: commit message to use if committing
317 317
318 318 user: name of user to use if committing
319 319
320 320 date: date tuple to use if committing'''
321 321
322 322 if not local:
323 323 for x in self.status()[:5]:
324 324 if '.hgtags' in x:
325 325 raise util.Abort(_('working copy of .hgtags is changed '
326 326 '(please commit .hgtags manually)'))
327 327
328 328 self.tags() # instantiate the cache
329 329 self._tag(names, node, message, local, user, date)
330 330
331 331 def tags(self):
332 332 '''return a mapping of tag to node'''
333 333 if self._tags is None:
334 334 (self._tags, self._tagtypes) = self._findtags()
335 335
336 336 return self._tags
337 337
338 338 def _findtags(self):
339 339 '''Do the hard work of finding tags. Return a pair of dicts
340 340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 341 maps tag name to a string like \'global\' or \'local\'.
342 342 Subclasses or extensions are free to add their own tags, but
343 343 should be aware that the returned dicts will be retained for the
344 344 duration of the localrepo object.'''
345 345
346 346 # XXX what tagtype should subclasses/extensions use? Currently
347 347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 348 # Should each extension invent its own tag type? Should there
349 349 # be one tagtype for all such "virtual" tags? Or is the status
350 350 # quo fine?
351 351
352 352 alltags = {} # map tag name to (node, hist)
353 353 tagtypes = {}
354 354
355 355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357 357
358 358 # Build the return dicts. Have to re-encode tag names because
359 359 # the tags module always uses UTF-8 (in order not to lose info
360 360 # writing to the cache), but the rest of Mercurial wants them in
361 361 # local encoding.
362 362 tags = {}
363 363 for (name, (node, hist)) in alltags.iteritems():
364 364 if node != nullid:
365 365 try:
366 366 # ignore tags to unknown nodes
367 367 self.changelog.lookup(node)
368 368 tags[encoding.tolocal(name)] = node
369 369 except error.LookupError:
370 370 pass
371 371 tags['tip'] = self.changelog.tip()
372 372 tagtypes = dict([(encoding.tolocal(name), value)
373 373 for (name, value) in tagtypes.iteritems()])
374 374 return (tags, tagtypes)
375 375
376 376 def tagtype(self, tagname):
377 377 '''
378 378 return the type of the given tag. result can be:
379 379
380 380 'local' : a local tag
381 381 'global' : a global tag
382 382 None : tag does not exist
383 383 '''
384 384
385 385 self.tags()
386 386
387 387 return self._tagtypes.get(tagname)
388 388
389 389 def tagslist(self):
390 390 '''return a list of tags ordered by revision'''
391 391 l = []
392 392 for t, n in self.tags().iteritems():
393 393 r = self.changelog.rev(n)
394 394 l.append((r, t, n))
395 395 return [(t, n) for r, t, n in sorted(l)]
396 396
397 397 def nodetags(self, node):
398 398 '''return the tags associated with a node'''
399 399 if not self.nodetagscache:
400 400 self.nodetagscache = {}
401 401 for t, n in self.tags().iteritems():
402 402 self.nodetagscache.setdefault(n, []).append(t)
403 403 for tags in self.nodetagscache.itervalues():
404 404 tags.sort()
405 405 return self.nodetagscache.get(node, [])
406 406
407 407 def nodebookmarks(self, node):
408 408 marks = []
409 409 for bookmark, n in self._bookmarks.iteritems():
410 410 if n == node:
411 411 marks.append(bookmark)
412 412 return sorted(marks)
413 413
414 414 def _branchtags(self, partial, lrev):
415 415 # TODO: rename this function?
416 416 tiprev = len(self) - 1
417 417 if lrev != tiprev:
418 418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
419 419 self._updatebranchcache(partial, ctxgen)
420 420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
421 421
422 422 return partial
423 423
424 424 def updatebranchcache(self):
425 425 tip = self.changelog.tip()
426 426 if self._branchcache is not None and self._branchcachetip == tip:
427 427 return self._branchcache
428 428
429 429 oldtip = self._branchcachetip
430 430 self._branchcachetip = tip
431 431 if oldtip is None or oldtip not in self.changelog.nodemap:
432 432 partial, last, lrev = self._readbranchcache()
433 433 else:
434 434 lrev = self.changelog.rev(oldtip)
435 435 partial = self._branchcache
436 436
437 437 self._branchtags(partial, lrev)
438 438 # this private cache holds all heads (not just tips)
439 439 self._branchcache = partial
440 440
441 441 def branchmap(self):
442 442 '''returns a dictionary {branch: [branchheads]}'''
443 443 self.updatebranchcache()
444 444 return self._branchcache
445 445
446 446 def branchtags(self):
447 447 '''return a dict where branch names map to the tipmost head of
448 448 the branch, open heads come before closed'''
449 449 bt = {}
450 450 for bn, heads in self.branchmap().iteritems():
451 451 tip = heads[-1]
452 452 for h in reversed(heads):
453 453 if 'close' not in self.changelog.read(h)[5]:
454 454 tip = h
455 455 break
456 456 bt[bn] = tip
457 457 return bt
458 458
459 459 def _readbranchcache(self):
460 460 partial = {}
461 461 try:
462 462 f = self.opener("cache/branchheads")
463 463 lines = f.read().split('\n')
464 464 f.close()
465 465 except (IOError, OSError):
466 466 return {}, nullid, nullrev
467 467
468 468 try:
469 469 last, lrev = lines.pop(0).split(" ", 1)
470 470 last, lrev = bin(last), int(lrev)
471 471 if lrev >= len(self) or self[lrev].node() != last:
472 472 # invalidate the cache
473 473 raise ValueError('invalidating branch cache (tip differs)')
474 474 for l in lines:
475 475 if not l:
476 476 continue
477 477 node, label = l.split(" ", 1)
478 478 label = encoding.tolocal(label.strip())
479 479 partial.setdefault(label, []).append(bin(node))
480 480 except KeyboardInterrupt:
481 481 raise
482 482 except Exception, inst:
483 483 if self.ui.debugflag:
484 484 self.ui.warn(str(inst), '\n')
485 485 partial, last, lrev = {}, nullid, nullrev
486 486 return partial, last, lrev
487 487
488 488 def _writebranchcache(self, branches, tip, tiprev):
489 489 try:
490 490 f = self.opener("cache/branchheads", "w", atomictemp=True)
491 491 f.write("%s %s\n" % (hex(tip), tiprev))
492 492 for label, nodes in branches.iteritems():
493 493 for node in nodes:
494 494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
495 495 f.rename()
496 496 except (IOError, OSError):
497 497 pass
498 498
499 499 def _updatebranchcache(self, partial, ctxgen):
500 500 # collect new branch entries
501 501 newbranches = {}
502 502 for c in ctxgen:
503 503 newbranches.setdefault(c.branch(), []).append(c.node())
504 504 # if older branchheads are reachable from new ones, they aren't
505 505 # really branchheads. Note checking parents is insufficient:
506 506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
507 507 for branch, newnodes in newbranches.iteritems():
508 508 bheads = partial.setdefault(branch, [])
509 509 bheads.extend(newnodes)
510 510 if len(bheads) <= 1:
511 511 continue
512 512 # starting from tip means fewer passes over reachable
513 513 while newnodes:
514 514 latest = newnodes.pop()
515 515 if latest not in bheads:
516 516 continue
517 517 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
518 518 reachable = self.changelog.reachable(latest, minbhrev)
519 519 reachable.remove(latest)
520 520 bheads = [b for b in bheads if b not in reachable]
521 521 partial[branch] = bheads
522 522
523 523 def lookup(self, key):
524 524 if isinstance(key, int):
525 525 return self.changelog.node(key)
526 526 elif key == '.':
527 527 return self.dirstate.p1()
528 528 elif key == 'null':
529 529 return nullid
530 530 elif key == 'tip':
531 531 return self.changelog.tip()
532 532 n = self.changelog._match(key)
533 533 if n:
534 534 return n
535 535 if key in self._bookmarks:
536 536 return self._bookmarks[key]
537 537 if key in self.tags():
538 538 return self.tags()[key]
539 539 if key in self.branchtags():
540 540 return self.branchtags()[key]
541 541 n = self.changelog._partialmatch(key)
542 542 if n:
543 543 return n
544 544
545 545 # can't find key, check if it might have come from damaged dirstate
546 546 if key in self.dirstate.parents():
547 547 raise error.Abort(_("working directory has unknown parent '%s'!")
548 548 % short(key))
549 549 try:
550 550 if len(key) == 20:
551 551 key = hex(key)
552 552 except:
553 553 pass
554 554 raise error.RepoLookupError(_("unknown revision '%s'") % key)
555 555
556 556 def lookupbranch(self, key, remote=None):
557 557 repo = remote or self
558 558 if key in repo.branchmap():
559 559 return key
560 560
561 561 repo = (remote and remote.local()) and remote or self
562 562 return repo[key].branch()
563 563
564 564 def known(self, nodes):
565 565 nm = self.changelog.nodemap
566 566 return [(n in nm) for n in nodes]
567 567
568 568 def local(self):
569 569 return True
570 570
571 571 def join(self, f):
572 572 return os.path.join(self.path, f)
573 573
574 574 def wjoin(self, f):
575 575 return os.path.join(self.root, f)
576 576
577 577 def file(self, f):
578 578 if f[0] == '/':
579 579 f = f[1:]
580 580 return filelog.filelog(self.sopener, f)
581 581
582 582 def changectx(self, changeid):
583 583 return self[changeid]
584 584
585 585 def parents(self, changeid=None):
586 586 '''get list of changectxs for parents of changeid'''
587 587 return self[changeid].parents()
588 588
589 589 def filectx(self, path, changeid=None, fileid=None):
590 590 """changeid can be a changeset revision, node, or tag.
591 591 fileid can be a file revision or node."""
592 592 return context.filectx(self, path, changeid, fileid)
593 593
594 594 def getcwd(self):
595 595 return self.dirstate.getcwd()
596 596
597 597 def pathto(self, f, cwd=None):
598 598 return self.dirstate.pathto(f, cwd)
599 599
600 600 def wfile(self, f, mode='r'):
601 601 return self.wopener(f, mode)
602 602
603 603 def _link(self, f):
604 604 return os.path.islink(self.wjoin(f))
605 605
606 606 def _loadfilter(self, filter):
607 607 if filter not in self.filterpats:
608 608 l = []
609 609 for pat, cmd in self.ui.configitems(filter):
610 610 if cmd == '!':
611 611 continue
612 612 mf = matchmod.match(self.root, '', [pat])
613 613 fn = None
614 614 params = cmd
615 615 for name, filterfn in self._datafilters.iteritems():
616 616 if cmd.startswith(name):
617 617 fn = filterfn
618 618 params = cmd[len(name):].lstrip()
619 619 break
620 620 if not fn:
621 621 fn = lambda s, c, **kwargs: util.filter(s, c)
622 622 # Wrap old filters not supporting keyword arguments
623 623 if not inspect.getargspec(fn)[2]:
624 624 oldfn = fn
625 625 fn = lambda s, c, **kwargs: oldfn(s, c)
626 626 l.append((mf, fn, params))
627 627 self.filterpats[filter] = l
628 628 return self.filterpats[filter]
629 629
630 630 def _filter(self, filterpats, filename, data):
631 631 for mf, fn, cmd in filterpats:
632 632 if mf(filename):
633 633 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
634 634 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
635 635 break
636 636
637 637 return data
638 638
639 639 @propertycache
640 640 def _encodefilterpats(self):
641 641 return self._loadfilter('encode')
642 642
643 643 @propertycache
644 644 def _decodefilterpats(self):
645 645 return self._loadfilter('decode')
646 646
647 647 def adddatafilter(self, name, filter):
648 648 self._datafilters[name] = filter
649 649
650 650 def wread(self, filename):
651 651 if self._link(filename):
652 652 data = os.readlink(self.wjoin(filename))
653 653 else:
654 654 data = self.wopener(filename, 'r').read()
655 655 return self._filter(self._encodefilterpats, filename, data)
656 656
657 657 def wwrite(self, filename, data, flags):
658 658 data = self._filter(self._decodefilterpats, filename, data)
659 659 if 'l' in flags:
660 660 self.wopener.symlink(data, filename)
661 661 else:
662 662 self.wopener(filename, 'w').write(data)
663 663 if 'x' in flags:
664 664 util.set_flags(self.wjoin(filename), False, True)
665 665
666 666 def wwritedata(self, filename, data):
667 667 return self._filter(self._decodefilterpats, filename, data)
668 668
669 669 def transaction(self, desc):
670 670 tr = self._transref and self._transref() or None
671 671 if tr and tr.running():
672 672 return tr.nest()
673 673
674 674 # abort here if the journal already exists
675 675 if os.path.exists(self.sjoin("journal")):
676 676 raise error.RepoError(
677 677 _("abandoned transaction found - run hg recover"))
678 678
679 679 # save dirstate for rollback
680 680 try:
681 681 ds = self.opener("dirstate").read()
682 682 except IOError:
683 683 ds = ""
684 684 self.opener("journal.dirstate", "w").write(ds)
685 685 self.opener("journal.branch", "w").write(
686 686 encoding.fromlocal(self.dirstate.branch()))
687 687 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
688 688
689 689 renames = [(self.sjoin("journal"), self.sjoin("undo")),
690 690 (self.join("journal.dirstate"), self.join("undo.dirstate")),
691 691 (self.join("journal.branch"), self.join("undo.branch")),
692 692 (self.join("journal.desc"), self.join("undo.desc"))]
693 693 tr = transaction.transaction(self.ui.warn, self.sopener,
694 694 self.sjoin("journal"),
695 695 aftertrans(renames),
696 696 self.store.createmode)
697 697 self._transref = weakref.ref(tr)
698 698 return tr
699 699
700 700 def recover(self):
701 701 lock = self.lock()
702 702 try:
703 703 if os.path.exists(self.sjoin("journal")):
704 704 self.ui.status(_("rolling back interrupted transaction\n"))
705 705 transaction.rollback(self.sopener, self.sjoin("journal"),
706 706 self.ui.warn)
707 707 self.invalidate()
708 708 return True
709 709 else:
710 710 self.ui.warn(_("no interrupted transaction available\n"))
711 711 return False
712 712 finally:
713 713 lock.release()
714 714
715 715 def rollback(self, dryrun=False):
716 716 wlock = lock = None
717 717 try:
718 718 wlock = self.wlock()
719 719 lock = self.lock()
720 720 if os.path.exists(self.sjoin("undo")):
721 721 try:
722 722 args = self.opener("undo.desc", "r").read().splitlines()
723 723 if len(args) >= 3 and self.ui.verbose:
724 724 desc = _("repository tip rolled back to revision %s"
725 725 " (undo %s: %s)\n") % (
726 726 int(args[0]) - 1, args[1], args[2])
727 727 elif len(args) >= 2:
728 728 desc = _("repository tip rolled back to revision %s"
729 729 " (undo %s)\n") % (
730 730 int(args[0]) - 1, args[1])
731 731 except IOError:
732 732 desc = _("rolling back unknown transaction\n")
733 733 self.ui.status(desc)
734 734 if dryrun:
735 735 return
736 736 transaction.rollback(self.sopener, self.sjoin("undo"),
737 737 self.ui.warn)
738 738 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
739 739 if os.path.exists(self.join('undo.bookmarks')):
740 740 util.rename(self.join('undo.bookmarks'),
741 741 self.join('bookmarks'))
742 742 try:
743 743 branch = self.opener("undo.branch").read()
744 744 self.dirstate.setbranch(branch)
745 745 except IOError:
746 746 self.ui.warn(_("Named branch could not be reset, "
747 747 "current branch still is: %s\n")
748 748 % self.dirstate.branch())
749 749 self.invalidate()
750 750 self.dirstate.invalidate()
751 751 self.destroyed()
752 752 parents = tuple([p.rev() for p in self.parents()])
753 753 if len(parents) > 1:
754 754 self.ui.status(_("working directory now based on "
755 755 "revisions %d and %d\n") % parents)
756 756 else:
757 757 self.ui.status(_("working directory now based on "
758 758 "revision %d\n") % parents)
759 759 else:
760 760 self.ui.warn(_("no rollback information available\n"))
761 761 return 1
762 762 finally:
763 763 release(lock, wlock)
764 764
765 765 def invalidatecaches(self):
766 766 self._tags = None
767 767 self._tagtypes = None
768 768 self.nodetagscache = None
769 769 self._branchcache = None # in UTF-8
770 770 self._branchcachetip = None
771 771
772 772 def invalidate(self):
773 773 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
774 774 if a in self.__dict__:
775 775 delattr(self, a)
776 776 self.invalidatecaches()
777 777
778 778 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
779 779 try:
780 780 l = lock.lock(lockname, 0, releasefn, desc=desc)
781 781 except error.LockHeld, inst:
782 782 if not wait:
783 783 raise
784 784 self.ui.warn(_("waiting for lock on %s held by %r\n") %
785 785 (desc, inst.locker))
786 786 # default to 600 seconds timeout
787 787 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
788 788 releasefn, desc=desc)
789 789 if acquirefn:
790 790 acquirefn()
791 791 return l
792 792
793 793 def lock(self, wait=True):
794 794 '''Lock the repository store (.hg/store) and return a weak reference
795 795 to the lock. Use this before modifying the store (e.g. committing or
796 796 stripping). If you are opening a transaction, get a lock as well.)'''
797 797 l = self._lockref and self._lockref()
798 798 if l is not None and l.held:
799 799 l.lock()
800 800 return l
801 801
802 802 l = self._lock(self.sjoin("lock"), wait, self.store.write,
803 803 self.invalidate, _('repository %s') % self.origroot)
804 804 self._lockref = weakref.ref(l)
805 805 return l
806 806
807 807 def wlock(self, wait=True):
808 808 '''Lock the non-store parts of the repository (everything under
809 809 .hg except .hg/store) and return a weak reference to the lock.
810 810 Use this before modifying files in .hg.'''
811 811 l = self._wlockref and self._wlockref()
812 812 if l is not None and l.held:
813 813 l.lock()
814 814 return l
815 815
816 816 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
817 817 self.dirstate.invalidate, _('working directory of %s') %
818 818 self.origroot)
819 819 self._wlockref = weakref.ref(l)
820 820 return l
821 821
822 822 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
823 823 """
824 824 commit an individual file as part of a larger transaction
825 825 """
826 826
827 827 fname = fctx.path()
828 828 text = fctx.data()
829 829 flog = self.file(fname)
830 830 fparent1 = manifest1.get(fname, nullid)
831 831 fparent2 = fparent2o = manifest2.get(fname, nullid)
832 832
833 833 meta = {}
834 834 copy = fctx.renamed()
835 835 if copy and copy[0] != fname:
836 836 # Mark the new revision of this file as a copy of another
837 837 # file. This copy data will effectively act as a parent
838 838 # of this new revision. If this is a merge, the first
839 839 # parent will be the nullid (meaning "look up the copy data")
840 840 # and the second one will be the other parent. For example:
841 841 #
842 842 # 0 --- 1 --- 3 rev1 changes file foo
843 843 # \ / rev2 renames foo to bar and changes it
844 844 # \- 2 -/ rev3 should have bar with all changes and
845 845 # should record that bar descends from
846 846 # bar in rev2 and foo in rev1
847 847 #
848 848 # this allows this merge to succeed:
849 849 #
850 850 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
851 851 # \ / merging rev3 and rev4 should use bar@rev2
852 852 # \- 2 --- 4 as the merge base
853 853 #
854 854
855 855 cfname = copy[0]
856 856 crev = manifest1.get(cfname)
857 857 newfparent = fparent2
858 858
859 859 if manifest2: # branch merge
860 860 if fparent2 == nullid or crev is None: # copied on remote side
861 861 if cfname in manifest2:
862 862 crev = manifest2[cfname]
863 863 newfparent = fparent1
864 864
865 865 # find source in nearest ancestor if we've lost track
866 866 if not crev:
867 867 self.ui.debug(" %s: searching for copy revision for %s\n" %
868 868 (fname, cfname))
869 869 for ancestor in self[None].ancestors():
870 870 if cfname in ancestor:
871 871 crev = ancestor[cfname].filenode()
872 872 break
873 873
874 874 if crev:
875 875 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
876 876 meta["copy"] = cfname
877 877 meta["copyrev"] = hex(crev)
878 878 fparent1, fparent2 = nullid, newfparent
879 879 else:
880 880 self.ui.warn(_("warning: can't find ancestor for '%s' "
881 881 "copied from '%s'!\n") % (fname, cfname))
882 882
883 883 elif fparent2 != nullid:
884 884 # is one parent an ancestor of the other?
885 885 fparentancestor = flog.ancestor(fparent1, fparent2)
886 886 if fparentancestor == fparent1:
887 887 fparent1, fparent2 = fparent2, nullid
888 888 elif fparentancestor == fparent2:
889 889 fparent2 = nullid
890 890
891 891 # is the file changed?
892 892 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
893 893 changelist.append(fname)
894 894 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
895 895
896 896 # are just the flags changed during merge?
897 897 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
898 898 changelist.append(fname)
899 899
900 900 return fparent1
901 901
902 902 def commit(self, text="", user=None, date=None, match=None, force=False,
903 903 editor=False, extra={}):
904 904 """Add a new revision to current repository.
905 905
906 906 Revision information is gathered from the working directory,
907 907 match can be used to filter the committed files. If editor is
908 908 supplied, it is called to get a commit message.
909 909 """
910 910
911 911 def fail(f, msg):
912 912 raise util.Abort('%s: %s' % (f, msg))
913 913
914 914 if not match:
915 915 match = matchmod.always(self.root, '')
916 916
917 917 if not force:
918 918 vdirs = []
919 919 match.dir = vdirs.append
920 920 match.bad = fail
921 921
922 922 wlock = self.wlock()
923 923 try:
924 924 wctx = self[None]
925 925 merge = len(wctx.parents()) > 1
926 926
927 927 if (not force and merge and match and
928 928 (match.files() or match.anypats())):
929 929 raise util.Abort(_('cannot partially commit a merge '
930 930 '(do not specify files or patterns)'))
931 931
932 932 changes = self.status(match=match, clean=force)
933 933 if force:
934 934 changes[0].extend(changes[6]) # mq may commit unchanged files
935 935
936 936 # check subrepos
937 937 subs = []
938 938 removedsubs = set()
939 939 for p in wctx.parents():
940 940 removedsubs.update(s for s in p.substate if match(s))
941 941 for s in wctx.substate:
942 942 removedsubs.discard(s)
943 943 if match(s) and wctx.sub(s).dirty():
944 944 subs.append(s)
945 945 if (subs or removedsubs):
946 946 if (not match('.hgsub') and
947 947 '.hgsub' in (wctx.modified() + wctx.added())):
948 948 raise util.Abort(_("can't commit subrepos without .hgsub"))
949 949 if '.hgsubstate' not in changes[0]:
950 950 changes[0].insert(0, '.hgsubstate')
951 951
952 952 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
953 953 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
954 954 if changedsubs:
955 955 raise util.Abort(_("uncommitted changes in subrepo %s")
956 956 % changedsubs[0])
957 957
958 958 # make sure all explicit patterns are matched
959 959 if not force and match.files():
960 960 matched = set(changes[0] + changes[1] + changes[2])
961 961
962 962 for f in match.files():
963 963 if f == '.' or f in matched or f in wctx.substate:
964 964 continue
965 965 if f in changes[3]: # missing
966 966 fail(f, _('file not found!'))
967 967 if f in vdirs: # visited directory
968 968 d = f + '/'
969 969 for mf in matched:
970 970 if mf.startswith(d):
971 971 break
972 972 else:
973 973 fail(f, _("no match under directory!"))
974 974 elif f not in self.dirstate:
975 975 fail(f, _("file not tracked!"))
976 976
977 977 if (not force and not extra.get("close") and not merge
978 978 and not (changes[0] or changes[1] or changes[2])
979 979 and wctx.branch() == wctx.p1().branch()):
980 980 return None
981 981
982 982 ms = mergemod.mergestate(self)
983 983 for f in changes[0]:
984 984 if f in ms and ms[f] == 'u':
985 985 raise util.Abort(_("unresolved merge conflicts "
986 986 "(see hg help resolve)"))
987 987
988 988 cctx = context.workingctx(self, text, user, date, extra, changes)
989 989 if editor:
990 990 cctx._text = editor(self, cctx, subs)
991 991 edited = (text != cctx._text)
992 992
993 993 # commit subs
994 994 if subs or removedsubs:
995 995 state = wctx.substate.copy()
996 996 for s in sorted(subs):
997 997 sub = wctx.sub(s)
998 998 self.ui.status(_('committing subrepository %s\n') %
999 999 subrepo.subrelpath(sub))
1000 1000 sr = sub.commit(cctx._text, user, date)
1001 1001 state[s] = (state[s][0], sr)
1002 1002 subrepo.writestate(self, state)
1003 1003
1004 1004 # Save commit message in case this transaction gets rolled back
1005 1005 # (e.g. by a pretxncommit hook). Leave the content alone on
1006 1006 # the assumption that the user will use the same editor again.
1007 1007 msgfile = self.opener('last-message.txt', 'wb')
1008 1008 msgfile.write(cctx._text)
1009 1009 msgfile.close()
1010 1010
1011 1011 p1, p2 = self.dirstate.parents()
1012 1012 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1013 1013 try:
1014 1014 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1015 1015 ret = self.commitctx(cctx, True)
1016 1016 except:
1017 1017 if edited:
1018 1018 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1019 1019 self.ui.write(
1020 1020 _('note: commit message saved in %s\n') % msgfn)
1021 1021 raise
1022 1022
1023 1023 # update bookmarks, dirstate and mergestate
1024 1024 bookmarks.update(self, p1, ret)
1025 1025 for f in changes[0] + changes[1]:
1026 1026 self.dirstate.normal(f)
1027 1027 for f in changes[2]:
1028 1028 self.dirstate.forget(f)
1029 1029 self.dirstate.setparents(ret)
1030 1030 ms.reset()
1031 1031 finally:
1032 1032 wlock.release()
1033 1033
1034 1034 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1035 1035 return ret
1036 1036
1037 1037 def commitctx(self, ctx, error=False):
1038 1038 """Add a new revision to current repository.
1039 1039 Revision information is passed via the context argument.
1040 1040 """
1041 1041
1042 1042 tr = lock = None
1043 1043 removed = list(ctx.removed())
1044 1044 p1, p2 = ctx.p1(), ctx.p2()
1045 1045 m1 = p1.manifest().copy()
1046 1046 m2 = p2.manifest()
1047 1047 user = ctx.user()
1048 1048
1049 1049 lock = self.lock()
1050 1050 try:
1051 1051 tr = self.transaction("commit")
1052 1052 trp = weakref.proxy(tr)
1053 1053
1054 1054 # check in files
1055 1055 new = {}
1056 1056 changed = []
1057 1057 linkrev = len(self)
1058 1058 for f in sorted(ctx.modified() + ctx.added()):
1059 1059 self.ui.note(f + "\n")
1060 1060 try:
1061 1061 fctx = ctx[f]
1062 1062 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1063 1063 changed)
1064 1064 m1.set(f, fctx.flags())
1065 1065 except OSError, inst:
1066 1066 self.ui.warn(_("trouble committing %s!\n") % f)
1067 1067 raise
1068 1068 except IOError, inst:
1069 1069 errcode = getattr(inst, 'errno', errno.ENOENT)
1070 1070 if error or errcode and errcode != errno.ENOENT:
1071 1071 self.ui.warn(_("trouble committing %s!\n") % f)
1072 1072 raise
1073 1073 else:
1074 1074 removed.append(f)
1075 1075
1076 1076 # update manifest
1077 1077 m1.update(new)
1078 1078 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1079 1079 drop = [f for f in removed if f in m1]
1080 1080 for f in drop:
1081 1081 del m1[f]
1082 1082 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1083 1083 p2.manifestnode(), (new, drop))
1084 1084
1085 1085 # update changelog
1086 1086 self.changelog.delayupdate()
1087 1087 n = self.changelog.add(mn, changed + removed, ctx.description(),
1088 1088 trp, p1.node(), p2.node(),
1089 1089 user, ctx.date(), ctx.extra().copy())
1090 1090 p = lambda: self.changelog.writepending() and self.root or ""
1091 1091 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1092 1092 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1093 1093 parent2=xp2, pending=p)
1094 1094 self.changelog.finalize(trp)
1095 1095 tr.close()
1096 1096
1097 1097 if self._branchcache:
1098 1098 self.updatebranchcache()
1099 1099 return n
1100 1100 finally:
1101 1101 if tr:
1102 1102 tr.release()
1103 1103 lock.release()
1104 1104
1105 1105 def destroyed(self):
1106 1106 '''Inform the repository that nodes have been destroyed.
1107 1107 Intended for use by strip and rollback, so there's a common
1108 1108 place for anything that has to be done after destroying history.'''
1109 1109 # XXX it might be nice if we could take the list of destroyed
1110 1110 # nodes, but I don't see an easy way for rollback() to do that
1111 1111
1112 1112 # Ensure the persistent tag cache is updated. Doing it now
1113 1113 # means that the tag cache only has to worry about destroyed
1114 1114 # heads immediately after a strip/rollback. That in turn
1115 1115 # guarantees that "cachetip == currenttip" (comparing both rev
1116 1116 # and node) always means no nodes have been added or destroyed.
1117 1117
1118 1118 # XXX this is suboptimal when qrefresh'ing: we strip the current
1119 1119 # head, refresh the tag cache, then immediately add a new head.
1120 1120 # But I think doing it this way is necessary for the "instant
1121 1121 # tag cache retrieval" case to work.
1122 1122 self.invalidatecaches()
1123 1123
1124 1124 def walk(self, match, node=None):
1125 1125 '''
1126 1126 walk recursively through the directory tree or a given
1127 1127 changeset, finding all files matched by the match
1128 1128 function
1129 1129 '''
1130 1130 return self[node].walk(match)
1131 1131
1132 1132 def status(self, node1='.', node2=None, match=None,
1133 1133 ignored=False, clean=False, unknown=False,
1134 1134 listsubrepos=False):
1135 1135 """return status of files between two nodes or node and working directory
1136 1136
1137 1137 If node1 is None, use the first dirstate parent instead.
1138 1138 If node2 is None, compare node1 with working directory.
1139 1139 """
1140 1140
1141 1141 def mfmatches(ctx):
1142 1142 mf = ctx.manifest().copy()
1143 1143 for fn in mf.keys():
1144 1144 if not match(fn):
1145 1145 del mf[fn]
1146 1146 return mf
1147 1147
1148 1148 if isinstance(node1, context.changectx):
1149 1149 ctx1 = node1
1150 1150 else:
1151 1151 ctx1 = self[node1]
1152 1152 if isinstance(node2, context.changectx):
1153 1153 ctx2 = node2
1154 1154 else:
1155 1155 ctx2 = self[node2]
1156 1156
1157 1157 working = ctx2.rev() is None
1158 1158 parentworking = working and ctx1 == self['.']
1159 1159 match = match or matchmod.always(self.root, self.getcwd())
1160 1160 listignored, listclean, listunknown = ignored, clean, unknown
1161 1161
1162 1162 # load earliest manifest first for caching reasons
1163 1163 if not working and ctx2.rev() < ctx1.rev():
1164 1164 ctx2.manifest()
1165 1165
1166 1166 if not parentworking:
1167 1167 def bad(f, msg):
1168 1168 if f not in ctx1:
1169 1169 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1170 1170 match.bad = bad
1171 1171
1172 1172 if working: # we need to scan the working dir
1173 1173 subrepos = []
1174 1174 if '.hgsub' in self.dirstate:
1175 1175 subrepos = ctx1.substate.keys()
1176 1176 s = self.dirstate.status(match, subrepos, listignored,
1177 1177 listclean, listunknown)
1178 1178 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1179 1179
1180 1180 # check for any possibly clean files
1181 1181 if parentworking and cmp:
1182 1182 fixup = []
1183 1183 # do a full compare of any files that might have changed
1184 1184 for f in sorted(cmp):
1185 1185 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1186 1186 or ctx1[f].cmp(ctx2[f])):
1187 1187 modified.append(f)
1188 1188 else:
1189 1189 fixup.append(f)
1190 1190
1191 1191 # update dirstate for files that are actually clean
1192 1192 if fixup:
1193 1193 if listclean:
1194 1194 clean += fixup
1195 1195
1196 1196 try:
1197 1197 # updating the dirstate is optional
1198 1198 # so we don't wait on the lock
1199 1199 wlock = self.wlock(False)
1200 1200 try:
1201 1201 for f in fixup:
1202 1202 self.dirstate.normal(f)
1203 1203 finally:
1204 1204 wlock.release()
1205 1205 except error.LockError:
1206 1206 pass
1207 1207
1208 1208 if not parentworking:
1209 1209 mf1 = mfmatches(ctx1)
1210 1210 if working:
1211 1211 # we are comparing working dir against non-parent
1212 1212 # generate a pseudo-manifest for the working dir
1213 1213 mf2 = mfmatches(self['.'])
1214 1214 for f in cmp + modified + added:
1215 1215 mf2[f] = None
1216 1216 mf2.set(f, ctx2.flags(f))
1217 1217 for f in removed:
1218 1218 if f in mf2:
1219 1219 del mf2[f]
1220 1220 else:
1221 1221 # we are comparing two revisions
1222 1222 deleted, unknown, ignored = [], [], []
1223 1223 mf2 = mfmatches(ctx2)
1224 1224
1225 1225 modified, added, clean = [], [], []
1226 1226 for fn in mf2:
1227 1227 if fn in mf1:
1228 if (mf1.flags(fn) != mf2.flags(fn) or
1229 (mf1[fn] != mf2[fn] and
1230 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1228 if (fn not in deleted and
1229 (mf1.flags(fn) != mf2.flags(fn) or
1230 (mf1[fn] != mf2[fn] and
1231 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1231 1232 modified.append(fn)
1232 1233 elif listclean:
1233 1234 clean.append(fn)
1234 1235 del mf1[fn]
1235 else:
1236 elif fn not in deleted:
1236 1237 added.append(fn)
1237 1238 removed = mf1.keys()
1238 1239
1239 1240 r = modified, added, removed, deleted, unknown, ignored, clean
1240 1241
1241 1242 if listsubrepos:
1242 1243 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1243 1244 if working:
1244 1245 rev2 = None
1245 1246 else:
1246 1247 rev2 = ctx2.substate[subpath][1]
1247 1248 try:
1248 1249 submatch = matchmod.narrowmatcher(subpath, match)
1249 1250 s = sub.status(rev2, match=submatch, ignored=listignored,
1250 1251 clean=listclean, unknown=listunknown,
1251 1252 listsubrepos=True)
1252 1253 for rfiles, sfiles in zip(r, s):
1253 1254 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1254 1255 except error.LookupError:
1255 1256 self.ui.status(_("skipping missing subrepository: %s\n")
1256 1257 % subpath)
1257 1258
1258 1259 for l in r:
1259 1260 l.sort()
1260 1261 return r
1261 1262
1262 1263 def heads(self, start=None):
1263 1264 heads = self.changelog.heads(start)
1264 1265 # sort the output in rev descending order
1265 1266 return sorted(heads, key=self.changelog.rev, reverse=True)
1266 1267
1267 1268 def branchheads(self, branch=None, start=None, closed=False):
1268 1269 '''return a (possibly filtered) list of heads for the given branch
1269 1270
1270 1271 Heads are returned in topological order, from newest to oldest.
1271 1272 If branch is None, use the dirstate branch.
1272 1273 If start is not None, return only heads reachable from start.
1273 1274 If closed is True, return heads that are marked as closed as well.
1274 1275 '''
1275 1276 if branch is None:
1276 1277 branch = self[None].branch()
1277 1278 branches = self.branchmap()
1278 1279 if branch not in branches:
1279 1280 return []
1280 1281 # the cache returns heads ordered lowest to highest
1281 1282 bheads = list(reversed(branches[branch]))
1282 1283 if start is not None:
1283 1284 # filter out the heads that cannot be reached from startrev
1284 1285 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1285 1286 bheads = [h for h in bheads if h in fbheads]
1286 1287 if not closed:
1287 1288 bheads = [h for h in bheads if
1288 1289 ('close' not in self.changelog.read(h)[5])]
1289 1290 return bheads
1290 1291
1291 1292 def branches(self, nodes):
1292 1293 if not nodes:
1293 1294 nodes = [self.changelog.tip()]
1294 1295 b = []
1295 1296 for n in nodes:
1296 1297 t = n
1297 1298 while 1:
1298 1299 p = self.changelog.parents(n)
1299 1300 if p[1] != nullid or p[0] == nullid:
1300 1301 b.append((t, n, p[0], p[1]))
1301 1302 break
1302 1303 n = p[0]
1303 1304 return b
1304 1305
1305 1306 def between(self, pairs):
1306 1307 r = []
1307 1308
1308 1309 for top, bottom in pairs:
1309 1310 n, l, i = top, [], 0
1310 1311 f = 1
1311 1312
1312 1313 while n != bottom and n != nullid:
1313 1314 p = self.changelog.parents(n)[0]
1314 1315 if i == f:
1315 1316 l.append(n)
1316 1317 f = f * 2
1317 1318 n = p
1318 1319 i += 1
1319 1320
1320 1321 r.append(l)
1321 1322
1322 1323 return r
1323 1324
1324 1325 def pull(self, remote, heads=None, force=False):
1325 1326 lock = self.lock()
1326 1327 try:
1327 1328 usecommon = remote.capable('getbundle')
1328 1329 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1329 1330 force=force, commononly=usecommon)
1330 1331 common, fetch, rheads = tmp
1331 1332 if not fetch:
1332 1333 self.ui.status(_("no changes found\n"))
1333 1334 result = 0
1334 1335 else:
1335 1336 if heads is None and list(common) == [nullid]:
1336 1337 self.ui.status(_("requesting all changes\n"))
1337 1338 elif heads is None and remote.capable('changegroupsubset'):
1338 1339 # issue1320, avoid a race if remote changed after discovery
1339 1340 heads = rheads
1340 1341
1341 1342 if usecommon:
1342 1343 cg = remote.getbundle('pull', common=common,
1343 1344 heads=heads or rheads)
1344 1345 elif heads is None:
1345 1346 cg = remote.changegroup(fetch, 'pull')
1346 1347 elif not remote.capable('changegroupsubset'):
1347 1348 raise util.Abort(_("partial pull cannot be done because "
1348 1349 "other repository doesn't support "
1349 1350 "changegroupsubset."))
1350 1351 else:
1351 1352 cg = remote.changegroupsubset(fetch, heads, 'pull')
1352 1353 result = self.addchangegroup(cg, 'pull', remote.url(),
1353 1354 lock=lock)
1354 1355 finally:
1355 1356 lock.release()
1356 1357
1357 1358 return result
1358 1359
1359 1360 def checkpush(self, force, revs):
1360 1361 """Extensions can override this function if additional checks have
1361 1362 to be performed before pushing, or call it if they override push
1362 1363 command.
1363 1364 """
1364 1365 pass
1365 1366
1366 1367 def push(self, remote, force=False, revs=None, newbranch=False):
1367 1368 '''Push outgoing changesets (limited by revs) from the current
1368 1369 repository to remote. Return an integer:
1369 1370 - 0 means HTTP error *or* nothing to push
1370 1371 - 1 means we pushed and remote head count is unchanged *or*
1371 1372 we have outgoing changesets but refused to push
1372 1373 - other values as described by addchangegroup()
1373 1374 '''
1374 1375 # there are two ways to push to remote repo:
1375 1376 #
1376 1377 # addchangegroup assumes local user can lock remote
1377 1378 # repo (local filesystem, old ssh servers).
1378 1379 #
1379 1380 # unbundle assumes local user cannot lock remote repo (new ssh
1380 1381 # servers, http servers).
1381 1382
1382 1383 self.checkpush(force, revs)
1383 1384 lock = None
1384 1385 unbundle = remote.capable('unbundle')
1385 1386 if not unbundle:
1386 1387 lock = remote.lock()
1387 1388 try:
1388 1389 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1389 1390 newbranch)
1390 1391 ret = remote_heads
1391 1392 if cg is not None:
1392 1393 if unbundle:
1393 1394 # local repo finds heads on server, finds out what
1394 1395 # revs it must push. once revs transferred, if server
1395 1396 # finds it has different heads (someone else won
1396 1397 # commit/push race), server aborts.
1397 1398 if force:
1398 1399 remote_heads = ['force']
1399 1400 # ssh: return remote's addchangegroup()
1400 1401 # http: return remote's addchangegroup() or 0 for error
1401 1402 ret = remote.unbundle(cg, remote_heads, 'push')
1402 1403 else:
1403 1404 # we return an integer indicating remote head count change
1404 1405 ret = remote.addchangegroup(cg, 'push', self.url(),
1405 1406 lock=lock)
1406 1407 finally:
1407 1408 if lock is not None:
1408 1409 lock.release()
1409 1410
1410 1411 self.ui.debug("checking for updated bookmarks\n")
1411 1412 rb = remote.listkeys('bookmarks')
1412 1413 for k in rb.keys():
1413 1414 if k in self._bookmarks:
1414 1415 nr, nl = rb[k], hex(self._bookmarks[k])
1415 1416 if nr in self:
1416 1417 cr = self[nr]
1417 1418 cl = self[nl]
1418 1419 if cl in cr.descendants():
1419 1420 r = remote.pushkey('bookmarks', k, nr, nl)
1420 1421 if r:
1421 1422 self.ui.status(_("updating bookmark %s\n") % k)
1422 1423 else:
1423 1424 self.ui.warn(_('updating bookmark %s'
1424 1425 ' failed!\n') % k)
1425 1426
1426 1427 return ret
1427 1428
1428 1429 def changegroupinfo(self, nodes, source):
1429 1430 if self.ui.verbose or source == 'bundle':
1430 1431 self.ui.status(_("%d changesets found\n") % len(nodes))
1431 1432 if self.ui.debugflag:
1432 1433 self.ui.debug("list of changesets:\n")
1433 1434 for node in nodes:
1434 1435 self.ui.debug("%s\n" % hex(node))
1435 1436
1436 1437 def changegroupsubset(self, bases, heads, source):
1437 1438 """Compute a changegroup consisting of all the nodes that are
1438 1439 descendents of any of the bases and ancestors of any of the heads.
1439 1440 Return a chunkbuffer object whose read() method will return
1440 1441 successive changegroup chunks.
1441 1442
1442 1443 It is fairly complex as determining which filenodes and which
1443 1444 manifest nodes need to be included for the changeset to be complete
1444 1445 is non-trivial.
1445 1446
1446 1447 Another wrinkle is doing the reverse, figuring out which changeset in
1447 1448 the changegroup a particular filenode or manifestnode belongs to.
1448 1449 """
1449 1450 cl = self.changelog
1450 1451 if not bases:
1451 1452 bases = [nullid]
1452 1453 csets, bases, heads = cl.nodesbetween(bases, heads)
1453 1454 # We assume that all ancestors of bases are known
1454 1455 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1455 1456 return self._changegroupsubset(common, csets, heads, source)
1456 1457
1457 1458 def getbundle(self, source, heads=None, common=None):
1458 1459 """Like changegroupsubset, but returns the set difference between the
1459 1460 ancestors of heads and the ancestors common.
1460 1461
1461 1462 If heads is None, use the local heads. If common is None, use [nullid].
1462 1463
1463 1464 The nodes in common might not all be known locally due to the way the
1464 1465 current discovery protocol works.
1465 1466 """
1466 1467 cl = self.changelog
1467 1468 if common:
1468 1469 nm = cl.nodemap
1469 1470 common = [n for n in common if n in nm]
1470 1471 else:
1471 1472 common = [nullid]
1472 1473 if not heads:
1473 1474 heads = cl.heads()
1474 1475 common, missing = cl.findcommonmissing(common, heads)
1475 1476 return self._changegroupsubset(common, missing, heads, source)
1476 1477
1477 1478 def _changegroupsubset(self, commonrevs, csets, heads, source):
1478 1479
1479 1480 cl = self.changelog
1480 1481 mf = self.manifest
1481 1482 mfs = {} # needed manifests
1482 1483 fnodes = {} # needed file nodes
1483 1484 changedfiles = set()
1484 1485 fstate = ['', {}]
1485 1486 count = [0]
1486 1487
1487 1488 # can we go through the fast path ?
1488 1489 heads.sort()
1489 1490 if heads == sorted(self.heads()):
1490 1491 return self._changegroup(csets, source)
1491 1492
1492 1493 # slow path
1493 1494 self.hook('preoutgoing', throw=True, source=source)
1494 1495 self.changegroupinfo(csets, source)
1495 1496
1496 1497 # filter any nodes that claim to be part of the known set
1497 1498 def prune(revlog, missing):
1498 1499 for n in missing:
1499 1500 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1500 1501 yield n
1501 1502
1502 1503 def lookup(revlog, x):
1503 1504 if revlog == cl:
1504 1505 c = cl.read(x)
1505 1506 changedfiles.update(c[3])
1506 1507 mfs.setdefault(c[0], x)
1507 1508 count[0] += 1
1508 1509 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1509 1510 return x
1510 1511 elif revlog == mf:
1511 1512 clnode = mfs[x]
1512 1513 mdata = mf.readfast(x)
1513 1514 for f in changedfiles:
1514 1515 if f in mdata:
1515 1516 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1516 1517 count[0] += 1
1517 1518 self.ui.progress(_('bundling'), count[0],
1518 1519 unit=_('manifests'), total=len(mfs))
1519 1520 return mfs[x]
1520 1521 else:
1521 1522 self.ui.progress(
1522 1523 _('bundling'), count[0], item=fstate[0],
1523 1524 unit=_('files'), total=len(changedfiles))
1524 1525 return fstate[1][x]
1525 1526
1526 1527 bundler = changegroup.bundle10(lookup)
1527 1528
1528 1529 def gengroup():
1529 1530 # Create a changenode group generator that will call our functions
1530 1531 # back to lookup the owning changenode and collect information.
1531 1532 for chunk in cl.group(csets, bundler):
1532 1533 yield chunk
1533 1534 self.ui.progress(_('bundling'), None)
1534 1535
1535 1536 # Create a generator for the manifestnodes that calls our lookup
1536 1537 # and data collection functions back.
1537 1538 count[0] = 0
1538 1539 for chunk in mf.group(prune(mf, mfs), bundler):
1539 1540 yield chunk
1540 1541 self.ui.progress(_('bundling'), None)
1541 1542
1542 1543 mfs.clear()
1543 1544
1544 1545 # Go through all our files in order sorted by name.
1545 1546 count[0] = 0
1546 1547 for fname in sorted(changedfiles):
1547 1548 filerevlog = self.file(fname)
1548 1549 if not len(filerevlog):
1549 1550 raise util.Abort(_("empty or missing revlog for %s") % fname)
1550 1551 fstate[0] = fname
1551 1552 fstate[1] = fnodes.pop(fname, {})
1552 1553 first = True
1553 1554
1554 1555 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1555 1556 bundler):
1556 1557 if first:
1557 1558 if chunk == bundler.close():
1558 1559 break
1559 1560 count[0] += 1
1560 1561 yield bundler.fileheader(fname)
1561 1562 first = False
1562 1563 yield chunk
1563 1564 # Signal that no more groups are left.
1564 1565 yield bundler.close()
1565 1566 self.ui.progress(_('bundling'), None)
1566 1567
1567 1568 if csets:
1568 1569 self.hook('outgoing', node=hex(csets[0]), source=source)
1569 1570
1570 1571 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1571 1572
1572 1573 def changegroup(self, basenodes, source):
1573 1574 # to avoid a race we use changegroupsubset() (issue1320)
1574 1575 return self.changegroupsubset(basenodes, self.heads(), source)
1575 1576
1576 1577 def _changegroup(self, nodes, source):
1577 1578 """Compute the changegroup of all nodes that we have that a recipient
1578 1579 doesn't. Return a chunkbuffer object whose read() method will return
1579 1580 successive changegroup chunks.
1580 1581
1581 1582 This is much easier than the previous function as we can assume that
1582 1583 the recipient has any changenode we aren't sending them.
1583 1584
1584 1585 nodes is the set of nodes to send"""
1585 1586
1586 1587 cl = self.changelog
1587 1588 mf = self.manifest
1588 1589 mfs = {}
1589 1590 changedfiles = set()
1590 1591 fstate = ['']
1591 1592 count = [0]
1592 1593
1593 1594 self.hook('preoutgoing', throw=True, source=source)
1594 1595 self.changegroupinfo(nodes, source)
1595 1596
1596 1597 revset = set([cl.rev(n) for n in nodes])
1597 1598
1598 1599 def gennodelst(log):
1599 1600 for r in log:
1600 1601 if log.linkrev(r) in revset:
1601 1602 yield log.node(r)
1602 1603
1603 1604 def lookup(revlog, x):
1604 1605 if revlog == cl:
1605 1606 c = cl.read(x)
1606 1607 changedfiles.update(c[3])
1607 1608 mfs.setdefault(c[0], x)
1608 1609 count[0] += 1
1609 1610 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1610 1611 return x
1611 1612 elif revlog == mf:
1612 1613 count[0] += 1
1613 1614 self.ui.progress(_('bundling'), count[0],
1614 1615 unit=_('manifests'), total=len(mfs))
1615 1616 return cl.node(revlog.linkrev(revlog.rev(x)))
1616 1617 else:
1617 1618 self.ui.progress(
1618 1619 _('bundling'), count[0], item=fstate[0],
1619 1620 total=len(changedfiles), unit=_('files'))
1620 1621 return cl.node(revlog.linkrev(revlog.rev(x)))
1621 1622
1622 1623 bundler = changegroup.bundle10(lookup)
1623 1624
1624 1625 def gengroup():
1625 1626 '''yield a sequence of changegroup chunks (strings)'''
1626 1627 # construct a list of all changed files
1627 1628
1628 1629 for chunk in cl.group(nodes, bundler):
1629 1630 yield chunk
1630 1631 self.ui.progress(_('bundling'), None)
1631 1632
1632 1633 count[0] = 0
1633 1634 for chunk in mf.group(gennodelst(mf), bundler):
1634 1635 yield chunk
1635 1636 self.ui.progress(_('bundling'), None)
1636 1637
1637 1638 count[0] = 0
1638 1639 for fname in sorted(changedfiles):
1639 1640 filerevlog = self.file(fname)
1640 1641 if not len(filerevlog):
1641 1642 raise util.Abort(_("empty or missing revlog for %s") % fname)
1642 1643 fstate[0] = fname
1643 1644 first = True
1644 1645 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1645 1646 if first:
1646 1647 if chunk == bundler.close():
1647 1648 break
1648 1649 count[0] += 1
1649 1650 yield bundler.fileheader(fname)
1650 1651 first = False
1651 1652 yield chunk
1652 1653 yield bundler.close()
1653 1654 self.ui.progress(_('bundling'), None)
1654 1655
1655 1656 if nodes:
1656 1657 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657 1658
1658 1659 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1659 1660
1660 1661 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1661 1662 """Add the changegroup returned by source.read() to this repo.
1662 1663 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1663 1664 the URL of the repo where this changegroup is coming from.
1664 1665 If lock is not None, the function takes ownership of the lock
1665 1666 and releases it after the changegroup is added.
1666 1667
1667 1668 Return an integer summarizing the change to this repo:
1668 1669 - nothing changed or no source: 0
1669 1670 - more heads than before: 1+added heads (2..n)
1670 1671 - fewer heads than before: -1-removed heads (-2..-n)
1671 1672 - number of heads stays the same: 1
1672 1673 """
1673 1674 def csmap(x):
1674 1675 self.ui.debug("add changeset %s\n" % short(x))
1675 1676 return len(cl)
1676 1677
1677 1678 def revmap(x):
1678 1679 return cl.rev(x)
1679 1680
1680 1681 if not source:
1681 1682 return 0
1682 1683
1683 1684 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1684 1685
1685 1686 changesets = files = revisions = 0
1686 1687 efiles = set()
1687 1688
1688 1689 # write changelog data to temp files so concurrent readers will not see
1689 1690 # inconsistent view
1690 1691 cl = self.changelog
1691 1692 cl.delayupdate()
1692 1693 oldheads = len(cl.heads())
1693 1694
1694 1695 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1695 1696 try:
1696 1697 trp = weakref.proxy(tr)
1697 1698 # pull off the changeset group
1698 1699 self.ui.status(_("adding changesets\n"))
1699 1700 clstart = len(cl)
1700 1701 class prog(object):
1701 1702 step = _('changesets')
1702 1703 count = 1
1703 1704 ui = self.ui
1704 1705 total = None
1705 1706 def __call__(self):
1706 1707 self.ui.progress(self.step, self.count, unit=_('chunks'),
1707 1708 total=self.total)
1708 1709 self.count += 1
1709 1710 pr = prog()
1710 1711 source.callback = pr
1711 1712
1712 1713 if (cl.addgroup(source, csmap, trp) is None
1713 1714 and not emptyok):
1714 1715 raise util.Abort(_("received changelog group is empty"))
1715 1716 clend = len(cl)
1716 1717 changesets = clend - clstart
1717 1718 for c in xrange(clstart, clend):
1718 1719 efiles.update(self[c].files())
1719 1720 efiles = len(efiles)
1720 1721 self.ui.progress(_('changesets'), None)
1721 1722
1722 1723 # pull off the manifest group
1723 1724 self.ui.status(_("adding manifests\n"))
1724 1725 pr.step = _('manifests')
1725 1726 pr.count = 1
1726 1727 pr.total = changesets # manifests <= changesets
1727 1728 # no need to check for empty manifest group here:
1728 1729 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1729 1730 # no new manifest will be created and the manifest group will
1730 1731 # be empty during the pull
1731 1732 self.manifest.addgroup(source, revmap, trp)
1732 1733 self.ui.progress(_('manifests'), None)
1733 1734
1734 1735 needfiles = {}
1735 1736 if self.ui.configbool('server', 'validate', default=False):
1736 1737 # validate incoming csets have their manifests
1737 1738 for cset in xrange(clstart, clend):
1738 1739 mfest = self.changelog.read(self.changelog.node(cset))[0]
1739 1740 mfest = self.manifest.readdelta(mfest)
1740 1741 # store file nodes we must see
1741 1742 for f, n in mfest.iteritems():
1742 1743 needfiles.setdefault(f, set()).add(n)
1743 1744
1744 1745 # process the files
1745 1746 self.ui.status(_("adding file changes\n"))
1746 1747 pr.step = 'files'
1747 1748 pr.count = 1
1748 1749 pr.total = efiles
1749 1750 source.callback = None
1750 1751
1751 1752 while 1:
1752 1753 f = source.chunk()
1753 1754 if not f:
1754 1755 break
1755 1756 self.ui.debug("adding %s revisions\n" % f)
1756 1757 pr()
1757 1758 fl = self.file(f)
1758 1759 o = len(fl)
1759 1760 if fl.addgroup(source, revmap, trp) is None:
1760 1761 raise util.Abort(_("received file revlog group is empty"))
1761 1762 revisions += len(fl) - o
1762 1763 files += 1
1763 1764 if f in needfiles:
1764 1765 needs = needfiles[f]
1765 1766 for new in xrange(o, len(fl)):
1766 1767 n = fl.node(new)
1767 1768 if n in needs:
1768 1769 needs.remove(n)
1769 1770 if not needs:
1770 1771 del needfiles[f]
1771 1772 self.ui.progress(_('files'), None)
1772 1773
1773 1774 for f, needs in needfiles.iteritems():
1774 1775 fl = self.file(f)
1775 1776 for n in needs:
1776 1777 try:
1777 1778 fl.rev(n)
1778 1779 except error.LookupError:
1779 1780 raise util.Abort(
1780 1781 _('missing file data for %s:%s - run hg verify') %
1781 1782 (f, hex(n)))
1782 1783
1783 1784 newheads = len(cl.heads())
1784 1785 heads = ""
1785 1786 if oldheads and newheads != oldheads:
1786 1787 heads = _(" (%+d heads)") % (newheads - oldheads)
1787 1788
1788 1789 self.ui.status(_("added %d changesets"
1789 1790 " with %d changes to %d files%s\n")
1790 1791 % (changesets, revisions, files, heads))
1791 1792
1792 1793 if changesets > 0:
1793 1794 p = lambda: cl.writepending() and self.root or ""
1794 1795 self.hook('pretxnchangegroup', throw=True,
1795 1796 node=hex(cl.node(clstart)), source=srctype,
1796 1797 url=url, pending=p)
1797 1798
1798 1799 # make changelog see real files again
1799 1800 cl.finalize(trp)
1800 1801
1801 1802 tr.close()
1802 1803 finally:
1803 1804 tr.release()
1804 1805 if lock:
1805 1806 lock.release()
1806 1807
1807 1808 if changesets > 0:
1808 1809 # forcefully update the on-disk branch cache
1809 1810 self.ui.debug("updating the branch cache\n")
1810 1811 self.updatebranchcache()
1811 1812 self.hook("changegroup", node=hex(cl.node(clstart)),
1812 1813 source=srctype, url=url)
1813 1814
1814 1815 for i in xrange(clstart, clend):
1815 1816 self.hook("incoming", node=hex(cl.node(i)),
1816 1817 source=srctype, url=url)
1817 1818
1818 1819 # never return 0 here:
1819 1820 if newheads < oldheads:
1820 1821 return newheads - oldheads - 1
1821 1822 else:
1822 1823 return newheads - oldheads + 1
1823 1824
1824 1825
1825 1826 def stream_in(self, remote, requirements):
1826 1827 lock = self.lock()
1827 1828 try:
1828 1829 fp = remote.stream_out()
1829 1830 l = fp.readline()
1830 1831 try:
1831 1832 resp = int(l)
1832 1833 except ValueError:
1833 1834 raise error.ResponseError(
1834 1835 _('Unexpected response from remote server:'), l)
1835 1836 if resp == 1:
1836 1837 raise util.Abort(_('operation forbidden by server'))
1837 1838 elif resp == 2:
1838 1839 raise util.Abort(_('locking the remote repository failed'))
1839 1840 elif resp != 0:
1840 1841 raise util.Abort(_('the server sent an unknown error code'))
1841 1842 self.ui.status(_('streaming all changes\n'))
1842 1843 l = fp.readline()
1843 1844 try:
1844 1845 total_files, total_bytes = map(int, l.split(' ', 1))
1845 1846 except (ValueError, TypeError):
1846 1847 raise error.ResponseError(
1847 1848 _('Unexpected response from remote server:'), l)
1848 1849 self.ui.status(_('%d files to transfer, %s of data\n') %
1849 1850 (total_files, util.bytecount(total_bytes)))
1850 1851 start = time.time()
1851 1852 for i in xrange(total_files):
1852 1853 # XXX doesn't support '\n' or '\r' in filenames
1853 1854 l = fp.readline()
1854 1855 try:
1855 1856 name, size = l.split('\0', 1)
1856 1857 size = int(size)
1857 1858 except (ValueError, TypeError):
1858 1859 raise error.ResponseError(
1859 1860 _('Unexpected response from remote server:'), l)
1860 1861 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1861 1862 # for backwards compat, name was partially encoded
1862 1863 ofp = self.sopener(store.decodedir(name), 'w')
1863 1864 for chunk in util.filechunkiter(fp, limit=size):
1864 1865 ofp.write(chunk)
1865 1866 ofp.close()
1866 1867 elapsed = time.time() - start
1867 1868 if elapsed <= 0:
1868 1869 elapsed = 0.001
1869 1870 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1870 1871 (util.bytecount(total_bytes), elapsed,
1871 1872 util.bytecount(total_bytes / elapsed)))
1872 1873
1873 1874 # new requirements = old non-format requirements + new format-related
1874 1875 # requirements from the streamed-in repository
1875 1876 requirements.update(set(self.requirements) - self.supportedformats)
1876 1877 self._applyrequirements(requirements)
1877 1878 self._writerequirements()
1878 1879
1879 1880 self.invalidate()
1880 1881 return len(self.heads()) + 1
1881 1882 finally:
1882 1883 lock.release()
1883 1884
1884 1885 def clone(self, remote, heads=[], stream=False):
1885 1886 '''clone remote repository.
1886 1887
1887 1888 keyword arguments:
1888 1889 heads: list of revs to clone (forces use of pull)
1889 1890 stream: use streaming clone if possible'''
1890 1891
1891 1892 # now, all clients that can request uncompressed clones can
1892 1893 # read repo formats supported by all servers that can serve
1893 1894 # them.
1894 1895
1895 1896 # if revlog format changes, client will have to check version
1896 1897 # and format flags on "stream" capability, and use
1897 1898 # uncompressed only if compatible.
1898 1899
1899 1900 if stream and not heads:
1900 1901 # 'stream' means remote revlog format is revlogv1 only
1901 1902 if remote.capable('stream'):
1902 1903 return self.stream_in(remote, set(('revlogv1',)))
1903 1904 # otherwise, 'streamreqs' contains the remote revlog format
1904 1905 streamreqs = remote.capable('streamreqs')
1905 1906 if streamreqs:
1906 1907 streamreqs = set(streamreqs.split(','))
1907 1908 # if we support it, stream in and adjust our requirements
1908 1909 if not streamreqs - self.supportedformats:
1909 1910 return self.stream_in(remote, streamreqs)
1910 1911 return self.pull(remote, heads)
1911 1912
1912 1913 def pushkey(self, namespace, key, old, new):
1913 1914 return pushkey.push(self, namespace, key, old, new)
1914 1915
1915 1916 def listkeys(self, namespace):
1916 1917 return pushkey.list(self, namespace)
1917 1918
1918 1919 def debugwireargs(self, one, two, three=None, four=None):
1919 1920 '''used to test argument passing over the wire'''
1920 1921 return "%s %s %s %s" % (one, two, three, four)
1921 1922
1922 1923 # used to avoid circular references so destructors work
1923 1924 def aftertrans(files):
1924 1925 renamefiles = [tuple(t) for t in files]
1925 1926 def a():
1926 1927 for src, dest in renamefiles:
1927 1928 util.rename(src, dest)
1928 1929 return a
1929 1930
1930 1931 def instance(ui, path, create):
1931 1932 return localrepository(ui, urlmod.localpath(path), create)
1932 1933
1933 1934 def islocal(path):
1934 1935 return True
@@ -1,166 +1,175 b''
1 1 $ echo "[extensions]" >> $HGRCPATH
2 2 $ echo "mq=" >> $HGRCPATH
3 3 $ echo "[mq]" >> $HGRCPATH
4 4 $ echo "git=keep" >> $HGRCPATH
5 5
6 6 $ hg init a
7 7 $ cd a
8 8
9 9 $ echo 'base' > base
10 10 $ hg ci -Ambase
11 11 adding base
12 12
13 13 $ hg qnew -mmqbase mqbase
14 14
15 15 $ echo 'patched' > base
16 16 $ hg qrefresh
17 17
18 18 qdiff:
19 19
20 20 $ hg qdiff
21 21 diff -r d20a80d4def3 base
22 22 --- a/base Thu Jan 01 00:00:00 1970 +0000
23 23 +++ b/base* (glob)
24 24 @@ -1,1 +1,1 @@
25 25 -base
26 26 +patched
27 27
28 28 qdiff dirname:
29 29
30 30 $ hg qdiff --nodates .
31 31 diff -r d20a80d4def3 base
32 32 --- a/base
33 33 +++ b/base
34 34 @@ -1,1 +1,1 @@
35 35 -base
36 36 +patched
37 37
38 38 qdiff filename:
39 39
40 40 $ hg qdiff --nodates base
41 41 diff -r d20a80d4def3 base
42 42 --- a/base
43 43 +++ b/base
44 44 @@ -1,1 +1,1 @@
45 45 -base
46 46 +patched
47 47
48 48 $ hg revert -a
49 49
50 50 $ hg qpop
51 51 popping mqbase
52 52 patch queue now empty
53 53
54 54 $ hg qdelete mqbase
55 55
56 56 $ printf '1\n2\n3\n4\nhello world\ngoodbye world\n7\n8\n9\n' > lines
57 57 $ hg ci -Amlines -d '2 0'
58 58 adding lines
59 59
60 60 $ hg qnew -mmqbase2 mqbase2
61 61 $ printf '\n\n1\n2\n3\n4\nhello world\n goodbye world\n7\n8\n9\n' > lines
62 62
63 63 $ hg qdiff --nodates -U 1
64 64 diff -r b0c220e1cf43 lines
65 65 --- a/lines
66 66 +++ b/lines
67 67 @@ -1,1 +1,3 @@
68 68 +
69 69 +
70 70 1
71 71 @@ -4,4 +6,4 @@
72 72 4
73 73 -hello world
74 74 -goodbye world
75 75 +hello world
76 76 + goodbye world
77 77 7
78 78
79 79 $ hg qdiff --nodates -b
80 80 diff -r b0c220e1cf43 lines
81 81 --- a/lines
82 82 +++ b/lines
83 83 @@ -1,9 +1,11 @@
84 84 +
85 85 +
86 86 1
87 87 2
88 88 3
89 89 4
90 90 hello world
91 91 -goodbye world
92 92 + goodbye world
93 93 7
94 94 8
95 95 9
96 96
97 97 $ hg qdiff --nodates -U 1 -B
98 98 diff -r b0c220e1cf43 lines
99 99 --- a/lines
100 100 +++ b/lines
101 101 @@ -4,4 +6,4 @@
102 102 4
103 103 -hello world
104 104 -goodbye world
105 105 +hello world
106 106 + goodbye world
107 107 7
108 108
109 109 $ hg qdiff --nodates -w
110 110 diff -r b0c220e1cf43 lines
111 111 --- a/lines
112 112 +++ b/lines
113 113 @@ -1,3 +1,5 @@
114 114 +
115 115 +
116 116 1
117 117 2
118 118 3
119 119
120 120 $ hg qdiff --nodates --reverse
121 121 diff -r b0c220e1cf43 lines
122 122 --- a/lines
123 123 +++ b/lines
124 124 @@ -1,11 +1,9 @@
125 125 -
126 126 -
127 127 1
128 128 2
129 129 3
130 130 4
131 131 -hello world
132 132 - goodbye world
133 133 +hello world
134 134 +goodbye world
135 135 7
136 136 8
137 137 9
138 138
139 139 qdiff preserve existing git flag:
140 140
141 141 $ hg qrefresh --git
142 142 $ echo a >> lines
143 143 $ hg qdiff
144 144 diff --git a/lines b/lines
145 145 --- a/lines
146 146 +++ b/lines
147 147 @@ -1,9 +1,12 @@
148 148 +
149 149 +
150 150 1
151 151 2
152 152 3
153 153 4
154 154 -hello world
155 155 -goodbye world
156 156 +hello world
157 157 + goodbye world
158 158 7
159 159 8
160 160 9
161 161 +a
162 162
163 163 $ hg qdiff --stat
164 164 lines | 7 +++++--
165 165 1 files changed, 5 insertions(+), 2 deletions(-)
166 $ hg qrefresh
166 167
168 qdiff when file deleted (but not removed) in working dir:
169
170 $ hg qnew deleted-file
171 $ echo a > newfile
172 $ hg add newfile
173 $ hg qrefresh
174 $ rm newfile
175 $ hg qdiff
General Comments 0
You need to be logged in to leave comments. Login now