##// END OF EJS Templates
dirstate: add filecache support
Idan Kamara -
r16200:9d4a2942 stable
parent child Browse files
Show More
@@ -1,736 +1,737 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 import errno
8 8
9 9 from node import nullid
10 10 from i18n import _
11 11 import scmutil, util, ignore, osutil, parsers, encoding
12 12 import struct, os, stat, errno
13 13 import cStringIO
14 14
15 15 _format = ">cllll"
16 16 propertycache = util.propertycache
17 17
18 18 def _finddirs(path):
19 19 pos = path.rfind('/')
20 20 while pos != -1:
21 21 yield path[:pos]
22 22 pos = path.rfind('/', 0, pos)
23 23
24 24 def _incdirs(dirs, path):
25 25 for base in _finddirs(path):
26 26 if base in dirs:
27 27 dirs[base] += 1
28 28 return
29 29 dirs[base] = 1
30 30
31 31 def _decdirs(dirs, path):
32 32 for base in _finddirs(path):
33 33 if dirs[base] > 1:
34 34 dirs[base] -= 1
35 35 return
36 36 del dirs[base]
37 37
38 38 class dirstate(object):
39 39
40 40 def __init__(self, opener, ui, root, validate):
41 41 '''Create a new dirstate object.
42 42
43 43 opener is an open()-like callable that can be used to open the
44 44 dirstate file; root is the root of the directory tracked by
45 45 the dirstate.
46 46 '''
47 47 self._opener = opener
48 48 self._validate = validate
49 49 self._root = root
50 50 self._rootdir = os.path.join(root, '')
51 51 self._dirty = False
52 52 self._dirtypl = False
53 53 self._lastnormaltime = 0
54 54 self._ui = ui
55 self._filecache = {}
55 56
56 57 @propertycache
57 58 def _map(self):
58 59 '''Return the dirstate contents as a map from filename to
59 60 (state, mode, size, time).'''
60 61 self._read()
61 62 return self._map
62 63
63 64 @propertycache
64 65 def _copymap(self):
65 66 self._read()
66 67 return self._copymap
67 68
68 69 @propertycache
69 70 def _normroot(self):
70 71 return util.normcase(self._root)
71 72
72 73 @propertycache
73 74 def _foldmap(self):
74 75 f = {}
75 76 for name in self._map:
76 77 f[util.normcase(name)] = name
77 78 f['.'] = '.' # prevents useless util.fspath() invocation
78 79 return f
79 80
80 81 @propertycache
81 82 def _branch(self):
82 83 try:
83 84 return self._opener.read("branch").strip() or "default"
84 85 except IOError, inst:
85 86 if inst.errno != errno.ENOENT:
86 87 raise
87 88 return "default"
88 89
89 90 @propertycache
90 91 def _pl(self):
91 92 try:
92 93 fp = self._opener("dirstate")
93 94 st = fp.read(40)
94 95 fp.close()
95 96 l = len(st)
96 97 if l == 40:
97 98 return st[:20], st[20:40]
98 99 elif l > 0 and l < 40:
99 100 raise util.Abort(_('working directory state appears damaged!'))
100 101 except IOError, err:
101 102 if err.errno != errno.ENOENT:
102 103 raise
103 104 return [nullid, nullid]
104 105
105 106 @propertycache
106 107 def _dirs(self):
107 108 dirs = {}
108 109 for f, s in self._map.iteritems():
109 110 if s[0] != 'r':
110 111 _incdirs(dirs, f)
111 112 return dirs
112 113
113 114 def dirs(self):
114 115 return self._dirs
115 116
116 117 @propertycache
117 118 def _ignore(self):
118 119 files = [self._join('.hgignore')]
119 120 for name, path in self._ui.configitems("ui"):
120 121 if name == 'ignore' or name.startswith('ignore.'):
121 122 files.append(util.expandpath(path))
122 123 return ignore.ignore(self._root, files, self._ui.warn)
123 124
124 125 @propertycache
125 126 def _slash(self):
126 127 return self._ui.configbool('ui', 'slash') and os.sep != '/'
127 128
128 129 @propertycache
129 130 def _checklink(self):
130 131 return util.checklink(self._root)
131 132
132 133 @propertycache
133 134 def _checkexec(self):
134 135 return util.checkexec(self._root)
135 136
136 137 @propertycache
137 138 def _checkcase(self):
138 139 return not util.checkcase(self._join('.hg'))
139 140
140 141 def _join(self, f):
141 142 # much faster than os.path.join()
142 143 # it's safe because f is always a relative path
143 144 return self._rootdir + f
144 145
145 146 def flagfunc(self, buildfallback):
146 147 if self._checklink and self._checkexec:
147 148 def f(x):
148 149 p = self._join(x)
149 150 if os.path.islink(p):
150 151 return 'l'
151 152 if util.isexec(p):
152 153 return 'x'
153 154 return ''
154 155 return f
155 156
156 157 fallback = buildfallback()
157 158 if self._checklink:
158 159 def f(x):
159 160 if os.path.islink(self._join(x)):
160 161 return 'l'
161 162 if 'x' in fallback(x):
162 163 return 'x'
163 164 return ''
164 165 return f
165 166 if self._checkexec:
166 167 def f(x):
167 168 if 'l' in fallback(x):
168 169 return 'l'
169 170 if util.isexec(self._join(x)):
170 171 return 'x'
171 172 return ''
172 173 return f
173 174 else:
174 175 return fallback
175 176
176 177 def getcwd(self):
177 178 cwd = os.getcwd()
178 179 if cwd == self._root:
179 180 return ''
180 181 # self._root ends with a path separator if self._root is '/' or 'C:\'
181 182 rootsep = self._root
182 183 if not util.endswithsep(rootsep):
183 184 rootsep += os.sep
184 185 if cwd.startswith(rootsep):
185 186 return cwd[len(rootsep):]
186 187 else:
187 188 # we're outside the repo. return an absolute path.
188 189 return cwd
189 190
190 191 def pathto(self, f, cwd=None):
191 192 if cwd is None:
192 193 cwd = self.getcwd()
193 194 path = util.pathto(self._root, cwd, f)
194 195 if self._slash:
195 196 return util.normpath(path)
196 197 return path
197 198
198 199 def __getitem__(self, key):
199 200 '''Return the current state of key (a filename) in the dirstate.
200 201
201 202 States are:
202 203 n normal
203 204 m needs merging
204 205 r marked for removal
205 206 a marked for addition
206 207 ? not tracked
207 208 '''
208 209 return self._map.get(key, ("?",))[0]
209 210
210 211 def __contains__(self, key):
211 212 return key in self._map
212 213
213 214 def __iter__(self):
214 215 for x in sorted(self._map):
215 216 yield x
216 217
217 218 def parents(self):
218 219 return [self._validate(p) for p in self._pl]
219 220
220 221 def p1(self):
221 222 return self._validate(self._pl[0])
222 223
223 224 def p2(self):
224 225 return self._validate(self._pl[1])
225 226
226 227 def branch(self):
227 228 return encoding.tolocal(self._branch)
228 229
229 230 def setparents(self, p1, p2=nullid):
230 231 self._dirty = self._dirtypl = True
231 232 self._pl = p1, p2
232 233
233 234 def setbranch(self, branch):
234 235 if branch in ['tip', '.', 'null']:
235 236 raise util.Abort(_('the name \'%s\' is reserved') % branch)
236 237 self._branch = encoding.fromlocal(branch)
237 238 self._opener.write("branch", self._branch + '\n')
238 239
239 240 def _read(self):
240 241 self._map = {}
241 242 self._copymap = {}
242 243 try:
243 244 st = self._opener.read("dirstate")
244 245 except IOError, err:
245 246 if err.errno != errno.ENOENT:
246 247 raise
247 248 return
248 249 if not st:
249 250 return
250 251
251 252 p = parsers.parse_dirstate(self._map, self._copymap, st)
252 253 if not self._dirtypl:
253 254 self._pl = p
254 255
255 256 def invalidate(self):
256 257 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
257 258 "_ignore"):
258 259 if a in self.__dict__:
259 260 delattr(self, a)
260 261 self._lastnormaltime = 0
261 262 self._dirty = False
262 263
263 264 def copy(self, source, dest):
264 265 """Mark dest as a copy of source. Unmark dest if source is None."""
265 266 if source == dest:
266 267 return
267 268 self._dirty = True
268 269 if source is not None:
269 270 self._copymap[dest] = source
270 271 elif dest in self._copymap:
271 272 del self._copymap[dest]
272 273
273 274 def copied(self, file):
274 275 return self._copymap.get(file, None)
275 276
276 277 def copies(self):
277 278 return self._copymap
278 279
279 280 def _droppath(self, f):
280 281 if self[f] not in "?r" and "_dirs" in self.__dict__:
281 282 _decdirs(self._dirs, f)
282 283
283 284 def _addpath(self, f, check=False):
284 285 oldstate = self[f]
285 286 if check or oldstate == "r":
286 287 scmutil.checkfilename(f)
287 288 if f in self._dirs:
288 289 raise util.Abort(_('directory %r already in dirstate') % f)
289 290 # shadows
290 291 for d in _finddirs(f):
291 292 if d in self._dirs:
292 293 break
293 294 if d in self._map and self[d] != 'r':
294 295 raise util.Abort(
295 296 _('file %r in dirstate clashes with %r') % (d, f))
296 297 if oldstate in "?r" and "_dirs" in self.__dict__:
297 298 _incdirs(self._dirs, f)
298 299
299 300 def normal(self, f):
300 301 '''Mark a file normal and clean.'''
301 302 self._dirty = True
302 303 self._addpath(f)
303 304 s = os.lstat(self._join(f))
304 305 mtime = int(s.st_mtime)
305 306 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
306 307 if f in self._copymap:
307 308 del self._copymap[f]
308 309 if mtime > self._lastnormaltime:
309 310 # Remember the most recent modification timeslot for status(),
310 311 # to make sure we won't miss future size-preserving file content
311 312 # modifications that happen within the same timeslot.
312 313 self._lastnormaltime = mtime
313 314
314 315 def normallookup(self, f):
315 316 '''Mark a file normal, but possibly dirty.'''
316 317 if self._pl[1] != nullid and f in self._map:
317 318 # if there is a merge going on and the file was either
318 319 # in state 'm' (-1) or coming from other parent (-2) before
319 320 # being removed, restore that state.
320 321 entry = self._map[f]
321 322 if entry[0] == 'r' and entry[2] in (-1, -2):
322 323 source = self._copymap.get(f)
323 324 if entry[2] == -1:
324 325 self.merge(f)
325 326 elif entry[2] == -2:
326 327 self.otherparent(f)
327 328 if source:
328 329 self.copy(source, f)
329 330 return
330 331 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
331 332 return
332 333 self._dirty = True
333 334 self._addpath(f)
334 335 self._map[f] = ('n', 0, -1, -1)
335 336 if f in self._copymap:
336 337 del self._copymap[f]
337 338
338 339 def otherparent(self, f):
339 340 '''Mark as coming from the other parent, always dirty.'''
340 341 if self._pl[1] == nullid:
341 342 raise util.Abort(_("setting %r to other parent "
342 343 "only allowed in merges") % f)
343 344 self._dirty = True
344 345 self._addpath(f)
345 346 self._map[f] = ('n', 0, -2, -1)
346 347 if f in self._copymap:
347 348 del self._copymap[f]
348 349
349 350 def add(self, f):
350 351 '''Mark a file added.'''
351 352 self._dirty = True
352 353 self._addpath(f, True)
353 354 self._map[f] = ('a', 0, -1, -1)
354 355 if f in self._copymap:
355 356 del self._copymap[f]
356 357
357 358 def remove(self, f):
358 359 '''Mark a file removed.'''
359 360 self._dirty = True
360 361 self._droppath(f)
361 362 size = 0
362 363 if self._pl[1] != nullid and f in self._map:
363 364 # backup the previous state
364 365 entry = self._map[f]
365 366 if entry[0] == 'm': # merge
366 367 size = -1
367 368 elif entry[0] == 'n' and entry[2] == -2: # other parent
368 369 size = -2
369 370 self._map[f] = ('r', 0, size, 0)
370 371 if size == 0 and f in self._copymap:
371 372 del self._copymap[f]
372 373
373 374 def merge(self, f):
374 375 '''Mark a file merged.'''
375 376 self._dirty = True
376 377 s = os.lstat(self._join(f))
377 378 self._addpath(f)
378 379 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
379 380 if f in self._copymap:
380 381 del self._copymap[f]
381 382
382 383 def drop(self, f):
383 384 '''Drop a file from the dirstate'''
384 385 if f in self._map:
385 386 self._dirty = True
386 387 self._droppath(f)
387 388 del self._map[f]
388 389
389 390 def _normalize(self, path, isknown):
390 391 normed = util.normcase(path)
391 392 folded = self._foldmap.get(normed, None)
392 393 if folded is None:
393 394 if isknown or not os.path.lexists(os.path.join(self._root, path)):
394 395 folded = path
395 396 else:
396 397 folded = self._foldmap.setdefault(normed,
397 398 util.fspath(normed, self._normroot))
398 399 return folded
399 400
400 401 def normalize(self, path, isknown=False):
401 402 '''
402 403 normalize the case of a pathname when on a casefolding filesystem
403 404
404 405 isknown specifies whether the filename came from walking the
405 406 disk, to avoid extra filesystem access
406 407
407 408 The normalized case is determined based on the following precedence:
408 409
409 410 - version of name already stored in the dirstate
410 411 - version of name stored on disk
411 412 - version provided via command arguments
412 413 '''
413 414
414 415 if self._checkcase:
415 416 return self._normalize(path, isknown)
416 417 return path
417 418
418 419 def clear(self):
419 420 self._map = {}
420 421 if "_dirs" in self.__dict__:
421 422 delattr(self, "_dirs")
422 423 self._copymap = {}
423 424 self._pl = [nullid, nullid]
424 425 self._lastnormaltime = 0
425 426 self._dirty = True
426 427
427 428 def rebuild(self, parent, files):
428 429 self.clear()
429 430 for f in files:
430 431 if 'x' in files.flags(f):
431 432 self._map[f] = ('n', 0777, -1, 0)
432 433 else:
433 434 self._map[f] = ('n', 0666, -1, 0)
434 435 self._pl = (parent, nullid)
435 436 self._dirty = True
436 437
437 438 def write(self):
438 439 if not self._dirty:
439 440 return
440 441 st = self._opener("dirstate", "w", atomictemp=True)
441 442
442 443 # use the modification time of the newly created temporary file as the
443 444 # filesystem's notion of 'now'
444 445 now = int(util.fstat(st).st_mtime)
445 446
446 447 cs = cStringIO.StringIO()
447 448 copymap = self._copymap
448 449 pack = struct.pack
449 450 write = cs.write
450 451 write("".join(self._pl))
451 452 for f, e in self._map.iteritems():
452 453 if e[0] == 'n' and e[3] == now:
453 454 # The file was last modified "simultaneously" with the current
454 455 # write to dirstate (i.e. within the same second for file-
455 456 # systems with a granularity of 1 sec). This commonly happens
456 457 # for at least a couple of files on 'update'.
457 458 # The user could change the file without changing its size
458 459 # within the same second. Invalidate the file's stat data in
459 460 # dirstate, forcing future 'status' calls to compare the
460 461 # contents of the file. This prevents mistakenly treating such
461 462 # files as clean.
462 463 e = (e[0], 0, -1, -1) # mark entry as 'unset'
463 464 self._map[f] = e
464 465
465 466 if f in copymap:
466 467 f = "%s\0%s" % (f, copymap[f])
467 468 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
468 469 write(e)
469 470 write(f)
470 471 st.write(cs.getvalue())
471 472 st.close()
472 473 self._lastnormaltime = 0
473 474 self._dirty = self._dirtypl = False
474 475
475 476 def _dirignore(self, f):
476 477 if f == '.':
477 478 return False
478 479 if self._ignore(f):
479 480 return True
480 481 for p in _finddirs(f):
481 482 if self._ignore(p):
482 483 return True
483 484 return False
484 485
485 486 def walk(self, match, subrepos, unknown, ignored):
486 487 '''
487 488 Walk recursively through the directory tree, finding all files
488 489 matched by match.
489 490
490 491 Return a dict mapping filename to stat-like object (either
491 492 mercurial.osutil.stat instance or return value of os.stat()).
492 493 '''
493 494
494 495 def fwarn(f, msg):
495 496 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
496 497 return False
497 498
498 499 def badtype(mode):
499 500 kind = _('unknown')
500 501 if stat.S_ISCHR(mode):
501 502 kind = _('character device')
502 503 elif stat.S_ISBLK(mode):
503 504 kind = _('block device')
504 505 elif stat.S_ISFIFO(mode):
505 506 kind = _('fifo')
506 507 elif stat.S_ISSOCK(mode):
507 508 kind = _('socket')
508 509 elif stat.S_ISDIR(mode):
509 510 kind = _('directory')
510 511 return _('unsupported file type (type is %s)') % kind
511 512
512 513 ignore = self._ignore
513 514 dirignore = self._dirignore
514 515 if ignored:
515 516 ignore = util.never
516 517 dirignore = util.never
517 518 elif not unknown:
518 519 # if unknown and ignored are False, skip step 2
519 520 ignore = util.always
520 521 dirignore = util.always
521 522
522 523 matchfn = match.matchfn
523 524 badfn = match.bad
524 525 dmap = self._map
525 526 normpath = util.normpath
526 527 listdir = osutil.listdir
527 528 lstat = os.lstat
528 529 getkind = stat.S_IFMT
529 530 dirkind = stat.S_IFDIR
530 531 regkind = stat.S_IFREG
531 532 lnkkind = stat.S_IFLNK
532 533 join = self._join
533 534 work = []
534 535 wadd = work.append
535 536
536 537 exact = skipstep3 = False
537 538 if matchfn == match.exact: # match.exact
538 539 exact = True
539 540 dirignore = util.always # skip step 2
540 541 elif match.files() and not match.anypats(): # match.match, no patterns
541 542 skipstep3 = True
542 543
543 544 if self._checkcase:
544 545 normalize = self._normalize
545 546 skipstep3 = False
546 547 else:
547 548 normalize = lambda x, y: x
548 549
549 550 files = sorted(match.files())
550 551 subrepos.sort()
551 552 i, j = 0, 0
552 553 while i < len(files) and j < len(subrepos):
553 554 subpath = subrepos[j] + "/"
554 555 if files[i] < subpath:
555 556 i += 1
556 557 continue
557 558 while i < len(files) and files[i].startswith(subpath):
558 559 del files[i]
559 560 j += 1
560 561
561 562 if not files or '.' in files:
562 563 files = ['']
563 564 results = dict.fromkeys(subrepos)
564 565 results['.hg'] = None
565 566
566 567 # step 1: find all explicit files
567 568 for ff in files:
568 569 nf = normalize(normpath(ff), False)
569 570 if nf in results:
570 571 continue
571 572
572 573 try:
573 574 st = lstat(join(nf))
574 575 kind = getkind(st.st_mode)
575 576 if kind == dirkind:
576 577 skipstep3 = False
577 578 if nf in dmap:
578 579 #file deleted on disk but still in dirstate
579 580 results[nf] = None
580 581 match.dir(nf)
581 582 if not dirignore(nf):
582 583 wadd(nf)
583 584 elif kind == regkind or kind == lnkkind:
584 585 results[nf] = st
585 586 else:
586 587 badfn(ff, badtype(kind))
587 588 if nf in dmap:
588 589 results[nf] = None
589 590 except OSError, inst:
590 591 if nf in dmap: # does it exactly match a file?
591 592 results[nf] = None
592 593 else: # does it match a directory?
593 594 prefix = nf + "/"
594 595 for fn in dmap:
595 596 if fn.startswith(prefix):
596 597 match.dir(nf)
597 598 skipstep3 = False
598 599 break
599 600 else:
600 601 badfn(ff, inst.strerror)
601 602
602 603 # step 2: visit subdirectories
603 604 while work:
604 605 nd = work.pop()
605 606 skip = None
606 607 if nd == '.':
607 608 nd = ''
608 609 else:
609 610 skip = '.hg'
610 611 try:
611 612 entries = listdir(join(nd), stat=True, skip=skip)
612 613 except OSError, inst:
613 614 if inst.errno == errno.EACCES:
614 615 fwarn(nd, inst.strerror)
615 616 continue
616 617 raise
617 618 for f, kind, st in entries:
618 619 nf = normalize(nd and (nd + "/" + f) or f, True)
619 620 if nf not in results:
620 621 if kind == dirkind:
621 622 if not ignore(nf):
622 623 match.dir(nf)
623 624 wadd(nf)
624 625 if nf in dmap and matchfn(nf):
625 626 results[nf] = None
626 627 elif kind == regkind or kind == lnkkind:
627 628 if nf in dmap:
628 629 if matchfn(nf):
629 630 results[nf] = st
630 631 elif matchfn(nf) and not ignore(nf):
631 632 results[nf] = st
632 633 elif nf in dmap and matchfn(nf):
633 634 results[nf] = None
634 635
635 636 # step 3: report unseen items in the dmap hash
636 637 if not skipstep3 and not exact:
637 638 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
638 639 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
639 640 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
640 641 st = None
641 642 results[nf] = st
642 643 for s in subrepos:
643 644 del results[s]
644 645 del results['.hg']
645 646 return results
646 647
647 648 def status(self, match, subrepos, ignored, clean, unknown):
648 649 '''Determine the status of the working copy relative to the
649 650 dirstate and return a tuple of lists (unsure, modified, added,
650 651 removed, deleted, unknown, ignored, clean), where:
651 652
652 653 unsure:
653 654 files that might have been modified since the dirstate was
654 655 written, but need to be read to be sure (size is the same
655 656 but mtime differs)
656 657 modified:
657 658 files that have definitely been modified since the dirstate
658 659 was written (different size or mode)
659 660 added:
660 661 files that have been explicitly added with hg add
661 662 removed:
662 663 files that have been explicitly removed with hg remove
663 664 deleted:
664 665 files that have been deleted through other means ("missing")
665 666 unknown:
666 667 files not in the dirstate that are not ignored
667 668 ignored:
668 669 files not in the dirstate that are ignored
669 670 (by _dirignore())
670 671 clean:
671 672 files that have definitely not been modified since the
672 673 dirstate was written
673 674 '''
674 675 listignored, listclean, listunknown = ignored, clean, unknown
675 676 lookup, modified, added, unknown, ignored = [], [], [], [], []
676 677 removed, deleted, clean = [], [], []
677 678
678 679 dmap = self._map
679 680 ladd = lookup.append # aka "unsure"
680 681 madd = modified.append
681 682 aadd = added.append
682 683 uadd = unknown.append
683 684 iadd = ignored.append
684 685 radd = removed.append
685 686 dadd = deleted.append
686 687 cadd = clean.append
687 688
688 689 lnkkind = stat.S_IFLNK
689 690
690 691 for fn, st in self.walk(match, subrepos, listunknown,
691 692 listignored).iteritems():
692 693 if fn not in dmap:
693 694 if (listignored or match.exact(fn)) and self._dirignore(fn):
694 695 if listignored:
695 696 iadd(fn)
696 697 elif listunknown:
697 698 uadd(fn)
698 699 continue
699 700
700 701 state, mode, size, time = dmap[fn]
701 702
702 703 if not st and state in "nma":
703 704 dadd(fn)
704 705 elif state == 'n':
705 706 # The "mode & lnkkind != lnkkind or self._checklink"
706 707 # lines are an expansion of "islink => checklink"
707 708 # where islink means "is this a link?" and checklink
708 709 # means "can we check links?".
709 710 mtime = int(st.st_mtime)
710 711 if (size >= 0 and
711 712 (size != st.st_size
712 713 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
713 714 and (mode & lnkkind != lnkkind or self._checklink)
714 715 or size == -2 # other parent
715 716 or fn in self._copymap):
716 717 madd(fn)
717 718 elif (mtime != time
718 719 and (mode & lnkkind != lnkkind or self._checklink)):
719 720 ladd(fn)
720 721 elif mtime == self._lastnormaltime:
721 722 # fn may have been changed in the same timeslot without
722 723 # changing its size. This can happen if we quickly do
723 724 # multiple commits in a single transaction.
724 725 # Force lookup, so we don't miss such a racy file change.
725 726 ladd(fn)
726 727 elif listclean:
727 728 cadd(fn)
728 729 elif state == 'm':
729 730 madd(fn)
730 731 elif state == 'a':
731 732 aadd(fn)
732 733 elif state == 'r':
733 734 radd(fn)
734 735
735 736 return (lookup, modified, added, removed, deleted, unknown, ignored,
736 737 clean)
@@ -1,2321 +1,2324 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class storecache(filecache):
23 23 """filecache for files in the store"""
24 24 def join(self, obj, fname):
25 25 return obj.sjoin(fname)
26 26
27 27 class localrepository(repo.repository):
28 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 29 'known', 'getbundle'))
30 30 supportedformats = set(('revlogv1', 'generaldelta'))
31 31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 32 'dotencode'))
33 33
34 34 def __init__(self, baseui, path=None, create=False):
35 35 repo.repository.__init__(self)
36 36 self.root = os.path.realpath(util.expandpath(path))
37 37 self.path = os.path.join(self.root, ".hg")
38 38 self.origroot = path
39 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 40 self.opener = scmutil.opener(self.path)
41 41 self.wopener = scmutil.opener(self.root)
42 42 self.baseui = baseui
43 43 self.ui = baseui.copy()
44 44 self._dirtyphases = False
45 45 # A list of callback to shape the phase if no data were found.
46 46 # Callback are in the form: func(repo, roots) --> processed root.
47 47 # This list it to be filled by extension during repo setup
48 48 self._phasedefaults = []
49 49
50 50 try:
51 51 self.ui.readconfig(self.join("hgrc"), self.root)
52 52 extensions.loadall(self.ui)
53 53 except IOError:
54 54 pass
55 55
56 56 if not os.path.isdir(self.path):
57 57 if create:
58 58 if not os.path.exists(path):
59 59 util.makedirs(path)
60 60 util.makedir(self.path, notindexed=True)
61 61 requirements = ["revlogv1"]
62 62 if self.ui.configbool('format', 'usestore', True):
63 63 os.mkdir(os.path.join(self.path, "store"))
64 64 requirements.append("store")
65 65 if self.ui.configbool('format', 'usefncache', True):
66 66 requirements.append("fncache")
67 67 if self.ui.configbool('format', 'dotencode', True):
68 68 requirements.append('dotencode')
69 69 # create an invalid changelog
70 70 self.opener.append(
71 71 "00changelog.i",
72 72 '\0\0\0\2' # represents revlogv2
73 73 ' dummy changelog to prevent using the old repo layout'
74 74 )
75 75 if self.ui.configbool('format', 'generaldelta', False):
76 76 requirements.append("generaldelta")
77 77 requirements = set(requirements)
78 78 else:
79 79 raise error.RepoError(_("repository %s not found") % path)
80 80 elif create:
81 81 raise error.RepoError(_("repository %s already exists") % path)
82 82 else:
83 83 try:
84 84 requirements = scmutil.readrequires(self.opener, self.supported)
85 85 except IOError, inst:
86 86 if inst.errno != errno.ENOENT:
87 87 raise
88 88 requirements = set()
89 89
90 90 self.sharedpath = self.path
91 91 try:
92 92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 93 if not os.path.exists(s):
94 94 raise error.RepoError(
95 95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 96 self.sharedpath = s
97 97 except IOError, inst:
98 98 if inst.errno != errno.ENOENT:
99 99 raise
100 100
101 101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 102 self.spath = self.store.path
103 103 self.sopener = self.store.opener
104 104 self.sjoin = self.store.join
105 105 self.opener.createmode = self.store.createmode
106 106 self._applyrequirements(requirements)
107 107 if create:
108 108 self._writerequirements()
109 109
110 110
111 111 self._branchcache = None
112 112 self._branchcachetip = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 # A cache for various files under .hg/ that tracks file changes,
118 118 # (used by the filecache decorator)
119 119 #
120 120 # Maps a property name to its util.filecacheentry
121 121 self._filecache = {}
122 122
123 123 def _applyrequirements(self, requirements):
124 124 self.requirements = requirements
125 125 openerreqs = set(('revlogv1', 'generaldelta'))
126 126 self.sopener.options = dict((r, 1) for r in requirements
127 127 if r in openerreqs)
128 128
129 129 def _writerequirements(self):
130 130 reqfile = self.opener("requires", "w")
131 131 for r in self.requirements:
132 132 reqfile.write("%s\n" % r)
133 133 reqfile.close()
134 134
135 135 def _checknested(self, path):
136 136 """Determine if path is a legal nested repository."""
137 137 if not path.startswith(self.root):
138 138 return False
139 139 subpath = path[len(self.root) + 1:]
140 140 normsubpath = util.pconvert(subpath)
141 141
142 142 # XXX: Checking against the current working copy is wrong in
143 143 # the sense that it can reject things like
144 144 #
145 145 # $ hg cat -r 10 sub/x.txt
146 146 #
147 147 # if sub/ is no longer a subrepository in the working copy
148 148 # parent revision.
149 149 #
150 150 # However, it can of course also allow things that would have
151 151 # been rejected before, such as the above cat command if sub/
152 152 # is a subrepository now, but was a normal directory before.
153 153 # The old path auditor would have rejected by mistake since it
154 154 # panics when it sees sub/.hg/.
155 155 #
156 156 # All in all, checking against the working copy seems sensible
157 157 # since we want to prevent access to nested repositories on
158 158 # the filesystem *now*.
159 159 ctx = self[None]
160 160 parts = util.splitpath(subpath)
161 161 while parts:
162 162 prefix = '/'.join(parts)
163 163 if prefix in ctx.substate:
164 164 if prefix == normsubpath:
165 165 return True
166 166 else:
167 167 sub = ctx.sub(prefix)
168 168 return sub.checknested(subpath[len(prefix) + 1:])
169 169 else:
170 170 parts.pop()
171 171 return False
172 172
173 173 @filecache('bookmarks')
174 174 def _bookmarks(self):
175 175 return bookmarks.read(self)
176 176
177 177 @filecache('bookmarks.current')
178 178 def _bookmarkcurrent(self):
179 179 return bookmarks.readcurrent(self)
180 180
181 181 def _writebookmarks(self, marks):
182 182 bookmarks.write(self)
183 183
184 184 @storecache('phaseroots')
185 185 def _phaseroots(self):
186 186 self._dirtyphases = False
187 187 phaseroots = phases.readroots(self)
188 188 phases.filterunknown(self, phaseroots)
189 189 return phaseroots
190 190
191 191 @propertycache
192 192 def _phaserev(self):
193 193 cache = [phases.public] * len(self)
194 194 for phase in phases.trackedphases:
195 195 roots = map(self.changelog.rev, self._phaseroots[phase])
196 196 if roots:
197 197 for rev in roots:
198 198 cache[rev] = phase
199 199 for rev in self.changelog.descendants(*roots):
200 200 cache[rev] = phase
201 201 return cache
202 202
203 203 @storecache('00changelog.i')
204 204 def changelog(self):
205 205 c = changelog.changelog(self.sopener)
206 206 if 'HG_PENDING' in os.environ:
207 207 p = os.environ['HG_PENDING']
208 208 if p.startswith(self.root):
209 209 c.readpending('00changelog.i.a')
210 210 return c
211 211
212 212 @storecache('00manifest.i')
213 213 def manifest(self):
214 214 return manifest.manifest(self.sopener)
215 215
216 216 @filecache('dirstate')
217 217 def dirstate(self):
218 218 warned = [0]
219 219 def validate(node):
220 220 try:
221 221 self.changelog.rev(node)
222 222 return node
223 223 except error.LookupError:
224 224 if not warned[0]:
225 225 warned[0] = True
226 226 self.ui.warn(_("warning: ignoring unknown"
227 227 " working parent %s!\n") % short(node))
228 228 return nullid
229 229
230 230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231 231
232 232 def __getitem__(self, changeid):
233 233 if changeid is None:
234 234 return context.workingctx(self)
235 235 return context.changectx(self, changeid)
236 236
237 237 def __contains__(self, changeid):
238 238 try:
239 239 return bool(self.lookup(changeid))
240 240 except error.RepoLookupError:
241 241 return False
242 242
243 243 def __nonzero__(self):
244 244 return True
245 245
246 246 def __len__(self):
247 247 return len(self.changelog)
248 248
249 249 def __iter__(self):
250 250 for i in xrange(len(self)):
251 251 yield i
252 252
253 253 def revs(self, expr, *args):
254 254 '''Return a list of revisions matching the given revset'''
255 255 expr = revset.formatspec(expr, *args)
256 256 m = revset.match(None, expr)
257 257 return [r for r in m(self, range(len(self)))]
258 258
259 259 def set(self, expr, *args):
260 260 '''
261 261 Yield a context for each matching revision, after doing arg
262 262 replacement via revset.formatspec
263 263 '''
264 264 for r in self.revs(expr, *args):
265 265 yield self[r]
266 266
267 267 def url(self):
268 268 return 'file:' + self.root
269 269
270 270 def hook(self, name, throw=False, **args):
271 271 return hook.hook(self.ui, self, name, throw, **args)
272 272
273 273 tag_disallowed = ':\r\n'
274 274
275 275 def _tag(self, names, node, message, local, user, date, extra={}):
276 276 if isinstance(names, str):
277 277 allchars = names
278 278 names = (names,)
279 279 else:
280 280 allchars = ''.join(names)
281 281 for c in self.tag_disallowed:
282 282 if c in allchars:
283 283 raise util.Abort(_('%r cannot be used in a tag name') % c)
284 284
285 285 branches = self.branchmap()
286 286 for name in names:
287 287 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 288 local=local)
289 289 if name in branches:
290 290 self.ui.warn(_("warning: tag %s conflicts with existing"
291 291 " branch name\n") % name)
292 292
293 293 def writetags(fp, names, munge, prevtags):
294 294 fp.seek(0, 2)
295 295 if prevtags and prevtags[-1] != '\n':
296 296 fp.write('\n')
297 297 for name in names:
298 298 m = munge and munge(name) or name
299 299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
300 300 old = self.tags().get(name, nullid)
301 301 fp.write('%s %s\n' % (hex(old), m))
302 302 fp.write('%s %s\n' % (hex(node), m))
303 303 fp.close()
304 304
305 305 prevtags = ''
306 306 if local:
307 307 try:
308 308 fp = self.opener('localtags', 'r+')
309 309 except IOError:
310 310 fp = self.opener('localtags', 'a')
311 311 else:
312 312 prevtags = fp.read()
313 313
314 314 # local tags are stored in the current charset
315 315 writetags(fp, names, None, prevtags)
316 316 for name in names:
317 317 self.hook('tag', node=hex(node), tag=name, local=local)
318 318 return
319 319
320 320 try:
321 321 fp = self.wfile('.hgtags', 'rb+')
322 322 except IOError, e:
323 323 if e.errno != errno.ENOENT:
324 324 raise
325 325 fp = self.wfile('.hgtags', 'ab')
326 326 else:
327 327 prevtags = fp.read()
328 328
329 329 # committed tags are stored in UTF-8
330 330 writetags(fp, names, encoding.fromlocal, prevtags)
331 331
332 332 fp.close()
333 333
334 334 self.invalidatecaches()
335 335
336 336 if '.hgtags' not in self.dirstate:
337 337 self[None].add(['.hgtags'])
338 338
339 339 m = matchmod.exact(self.root, '', ['.hgtags'])
340 340 tagnode = self.commit(message, user, date, extra=extra, match=m)
341 341
342 342 for name in names:
343 343 self.hook('tag', node=hex(node), tag=name, local=local)
344 344
345 345 return tagnode
346 346
347 347 def tag(self, names, node, message, local, user, date):
348 348 '''tag a revision with one or more symbolic names.
349 349
350 350 names is a list of strings or, when adding a single tag, names may be a
351 351 string.
352 352
353 353 if local is True, the tags are stored in a per-repository file.
354 354 otherwise, they are stored in the .hgtags file, and a new
355 355 changeset is committed with the change.
356 356
357 357 keyword arguments:
358 358
359 359 local: whether to store tags in non-version-controlled file
360 360 (default False)
361 361
362 362 message: commit message to use if committing
363 363
364 364 user: name of user to use if committing
365 365
366 366 date: date tuple to use if committing'''
367 367
368 368 if not local:
369 369 for x in self.status()[:5]:
370 370 if '.hgtags' in x:
371 371 raise util.Abort(_('working copy of .hgtags is changed '
372 372 '(please commit .hgtags manually)'))
373 373
374 374 self.tags() # instantiate the cache
375 375 self._tag(names, node, message, local, user, date)
376 376
377 377 @propertycache
378 378 def _tagscache(self):
379 379 '''Returns a tagscache object that contains various tags related caches.'''
380 380
381 381 # This simplifies its cache management by having one decorated
382 382 # function (this one) and the rest simply fetch things from it.
383 383 class tagscache(object):
384 384 def __init__(self):
385 385 # These two define the set of tags for this repository. tags
386 386 # maps tag name to node; tagtypes maps tag name to 'global' or
387 387 # 'local'. (Global tags are defined by .hgtags across all
388 388 # heads, and local tags are defined in .hg/localtags.)
389 389 # They constitute the in-memory cache of tags.
390 390 self.tags = self.tagtypes = None
391 391
392 392 self.nodetagscache = self.tagslist = None
393 393
394 394 cache = tagscache()
395 395 cache.tags, cache.tagtypes = self._findtags()
396 396
397 397 return cache
398 398
399 399 def tags(self):
400 400 '''return a mapping of tag to node'''
401 401 return self._tagscache.tags
402 402
403 403 def _findtags(self):
404 404 '''Do the hard work of finding tags. Return a pair of dicts
405 405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
406 406 maps tag name to a string like \'global\' or \'local\'.
407 407 Subclasses or extensions are free to add their own tags, but
408 408 should be aware that the returned dicts will be retained for the
409 409 duration of the localrepo object.'''
410 410
411 411 # XXX what tagtype should subclasses/extensions use? Currently
412 412 # mq and bookmarks add tags, but do not set the tagtype at all.
413 413 # Should each extension invent its own tag type? Should there
414 414 # be one tagtype for all such "virtual" tags? Or is the status
415 415 # quo fine?
416 416
417 417 alltags = {} # map tag name to (node, hist)
418 418 tagtypes = {}
419 419
420 420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
421 421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
422 422
423 423 # Build the return dicts. Have to re-encode tag names because
424 424 # the tags module always uses UTF-8 (in order not to lose info
425 425 # writing to the cache), but the rest of Mercurial wants them in
426 426 # local encoding.
427 427 tags = {}
428 428 for (name, (node, hist)) in alltags.iteritems():
429 429 if node != nullid:
430 430 try:
431 431 # ignore tags to unknown nodes
432 432 self.changelog.lookup(node)
433 433 tags[encoding.tolocal(name)] = node
434 434 except error.LookupError:
435 435 pass
436 436 tags['tip'] = self.changelog.tip()
437 437 tagtypes = dict([(encoding.tolocal(name), value)
438 438 for (name, value) in tagtypes.iteritems()])
439 439 return (tags, tagtypes)
440 440
441 441 def tagtype(self, tagname):
442 442 '''
443 443 return the type of the given tag. result can be:
444 444
445 445 'local' : a local tag
446 446 'global' : a global tag
447 447 None : tag does not exist
448 448 '''
449 449
450 450 return self._tagscache.tagtypes.get(tagname)
451 451
452 452 def tagslist(self):
453 453 '''return a list of tags ordered by revision'''
454 454 if not self._tagscache.tagslist:
455 455 l = []
456 456 for t, n in self.tags().iteritems():
457 457 r = self.changelog.rev(n)
458 458 l.append((r, t, n))
459 459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
460 460
461 461 return self._tagscache.tagslist
462 462
463 463 def nodetags(self, node):
464 464 '''return the tags associated with a node'''
465 465 if not self._tagscache.nodetagscache:
466 466 nodetagscache = {}
467 467 for t, n in self.tags().iteritems():
468 468 nodetagscache.setdefault(n, []).append(t)
469 469 for tags in nodetagscache.itervalues():
470 470 tags.sort()
471 471 self._tagscache.nodetagscache = nodetagscache
472 472 return self._tagscache.nodetagscache.get(node, [])
473 473
474 474 def nodebookmarks(self, node):
475 475 marks = []
476 476 for bookmark, n in self._bookmarks.iteritems():
477 477 if n == node:
478 478 marks.append(bookmark)
479 479 return sorted(marks)
480 480
481 481 def _branchtags(self, partial, lrev):
482 482 # TODO: rename this function?
483 483 tiprev = len(self) - 1
484 484 if lrev != tiprev:
485 485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
486 486 self._updatebranchcache(partial, ctxgen)
487 487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
488 488
489 489 return partial
490 490
491 491 def updatebranchcache(self):
492 492 tip = self.changelog.tip()
493 493 if self._branchcache is not None and self._branchcachetip == tip:
494 494 return
495 495
496 496 oldtip = self._branchcachetip
497 497 self._branchcachetip = tip
498 498 if oldtip is None or oldtip not in self.changelog.nodemap:
499 499 partial, last, lrev = self._readbranchcache()
500 500 else:
501 501 lrev = self.changelog.rev(oldtip)
502 502 partial = self._branchcache
503 503
504 504 self._branchtags(partial, lrev)
505 505 # this private cache holds all heads (not just tips)
506 506 self._branchcache = partial
507 507
508 508 def branchmap(self):
509 509 '''returns a dictionary {branch: [branchheads]}'''
510 510 self.updatebranchcache()
511 511 return self._branchcache
512 512
513 513 def branchtags(self):
514 514 '''return a dict where branch names map to the tipmost head of
515 515 the branch, open heads come before closed'''
516 516 bt = {}
517 517 for bn, heads in self.branchmap().iteritems():
518 518 tip = heads[-1]
519 519 for h in reversed(heads):
520 520 if 'close' not in self.changelog.read(h)[5]:
521 521 tip = h
522 522 break
523 523 bt[bn] = tip
524 524 return bt
525 525
526 526 def _readbranchcache(self):
527 527 partial = {}
528 528 try:
529 529 f = self.opener("cache/branchheads")
530 530 lines = f.read().split('\n')
531 531 f.close()
532 532 except (IOError, OSError):
533 533 return {}, nullid, nullrev
534 534
535 535 try:
536 536 last, lrev = lines.pop(0).split(" ", 1)
537 537 last, lrev = bin(last), int(lrev)
538 538 if lrev >= len(self) or self[lrev].node() != last:
539 539 # invalidate the cache
540 540 raise ValueError('invalidating branch cache (tip differs)')
541 541 for l in lines:
542 542 if not l:
543 543 continue
544 544 node, label = l.split(" ", 1)
545 545 label = encoding.tolocal(label.strip())
546 546 partial.setdefault(label, []).append(bin(node))
547 547 except KeyboardInterrupt:
548 548 raise
549 549 except Exception, inst:
550 550 if self.ui.debugflag:
551 551 self.ui.warn(str(inst), '\n')
552 552 partial, last, lrev = {}, nullid, nullrev
553 553 return partial, last, lrev
554 554
555 555 def _writebranchcache(self, branches, tip, tiprev):
556 556 try:
557 557 f = self.opener("cache/branchheads", "w", atomictemp=True)
558 558 f.write("%s %s\n" % (hex(tip), tiprev))
559 559 for label, nodes in branches.iteritems():
560 560 for node in nodes:
561 561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
562 562 f.close()
563 563 except (IOError, OSError):
564 564 pass
565 565
566 566 def _updatebranchcache(self, partial, ctxgen):
567 567 # collect new branch entries
568 568 newbranches = {}
569 569 for c in ctxgen:
570 570 newbranches.setdefault(c.branch(), []).append(c.node())
571 571 # if older branchheads are reachable from new ones, they aren't
572 572 # really branchheads. Note checking parents is insufficient:
573 573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
574 574 for branch, newnodes in newbranches.iteritems():
575 575 bheads = partial.setdefault(branch, [])
576 576 bheads.extend(newnodes)
577 577 if len(bheads) <= 1:
578 578 continue
579 579 bheads = sorted(bheads, key=lambda x: self[x].rev())
580 580 # starting from tip means fewer passes over reachable
581 581 while newnodes:
582 582 latest = newnodes.pop()
583 583 if latest not in bheads:
584 584 continue
585 585 minbhrev = self[bheads[0]].node()
586 586 reachable = self.changelog.reachable(latest, minbhrev)
587 587 reachable.remove(latest)
588 588 if reachable:
589 589 bheads = [b for b in bheads if b not in reachable]
590 590 partial[branch] = bheads
591 591
592 592 def lookup(self, key):
593 593 if isinstance(key, int):
594 594 return self.changelog.node(key)
595 595 elif key == '.':
596 596 return self.dirstate.p1()
597 597 elif key == 'null':
598 598 return nullid
599 599 elif key == 'tip':
600 600 return self.changelog.tip()
601 601 n = self.changelog._match(key)
602 602 if n:
603 603 return n
604 604 if key in self._bookmarks:
605 605 return self._bookmarks[key]
606 606 if key in self.tags():
607 607 return self.tags()[key]
608 608 if key in self.branchtags():
609 609 return self.branchtags()[key]
610 610 n = self.changelog._partialmatch(key)
611 611 if n:
612 612 return n
613 613
614 614 # can't find key, check if it might have come from damaged dirstate
615 615 if key in self.dirstate.parents():
616 616 raise error.Abort(_("working directory has unknown parent '%s'!")
617 617 % short(key))
618 618 try:
619 619 if len(key) == 20:
620 620 key = hex(key)
621 621 except TypeError:
622 622 pass
623 623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
624 624
625 625 def lookupbranch(self, key, remote=None):
626 626 repo = remote or self
627 627 if key in repo.branchmap():
628 628 return key
629 629
630 630 repo = (remote and remote.local()) and remote or self
631 631 return repo[key].branch()
632 632
633 633 def known(self, nodes):
634 634 nm = self.changelog.nodemap
635 635 result = []
636 636 for n in nodes:
637 637 r = nm.get(n)
638 638 resp = not (r is None or self._phaserev[r] >= phases.secret)
639 639 result.append(resp)
640 640 return result
641 641
642 642 def local(self):
643 643 return self
644 644
645 645 def join(self, f):
646 646 return os.path.join(self.path, f)
647 647
648 648 def wjoin(self, f):
649 649 return os.path.join(self.root, f)
650 650
651 651 def file(self, f):
652 652 if f[0] == '/':
653 653 f = f[1:]
654 654 return filelog.filelog(self.sopener, f)
655 655
656 656 def changectx(self, changeid):
657 657 return self[changeid]
658 658
659 659 def parents(self, changeid=None):
660 660 '''get list of changectxs for parents of changeid'''
661 661 return self[changeid].parents()
662 662
663 663 def filectx(self, path, changeid=None, fileid=None):
664 664 """changeid can be a changeset revision, node, or tag.
665 665 fileid can be a file revision or node."""
666 666 return context.filectx(self, path, changeid, fileid)
667 667
668 668 def getcwd(self):
669 669 return self.dirstate.getcwd()
670 670
671 671 def pathto(self, f, cwd=None):
672 672 return self.dirstate.pathto(f, cwd)
673 673
674 674 def wfile(self, f, mode='r'):
675 675 return self.wopener(f, mode)
676 676
677 677 def _link(self, f):
678 678 return os.path.islink(self.wjoin(f))
679 679
680 680 def _loadfilter(self, filter):
681 681 if filter not in self.filterpats:
682 682 l = []
683 683 for pat, cmd in self.ui.configitems(filter):
684 684 if cmd == '!':
685 685 continue
686 686 mf = matchmod.match(self.root, '', [pat])
687 687 fn = None
688 688 params = cmd
689 689 for name, filterfn in self._datafilters.iteritems():
690 690 if cmd.startswith(name):
691 691 fn = filterfn
692 692 params = cmd[len(name):].lstrip()
693 693 break
694 694 if not fn:
695 695 fn = lambda s, c, **kwargs: util.filter(s, c)
696 696 # Wrap old filters not supporting keyword arguments
697 697 if not inspect.getargspec(fn)[2]:
698 698 oldfn = fn
699 699 fn = lambda s, c, **kwargs: oldfn(s, c)
700 700 l.append((mf, fn, params))
701 701 self.filterpats[filter] = l
702 702 return self.filterpats[filter]
703 703
704 704 def _filter(self, filterpats, filename, data):
705 705 for mf, fn, cmd in filterpats:
706 706 if mf(filename):
707 707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
708 708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
709 709 break
710 710
711 711 return data
712 712
713 713 @propertycache
714 714 def _encodefilterpats(self):
715 715 return self._loadfilter('encode')
716 716
717 717 @propertycache
718 718 def _decodefilterpats(self):
719 719 return self._loadfilter('decode')
720 720
721 721 def adddatafilter(self, name, filter):
722 722 self._datafilters[name] = filter
723 723
724 724 def wread(self, filename):
725 725 if self._link(filename):
726 726 data = os.readlink(self.wjoin(filename))
727 727 else:
728 728 data = self.wopener.read(filename)
729 729 return self._filter(self._encodefilterpats, filename, data)
730 730
731 731 def wwrite(self, filename, data, flags):
732 732 data = self._filter(self._decodefilterpats, filename, data)
733 733 if 'l' in flags:
734 734 self.wopener.symlink(data, filename)
735 735 else:
736 736 self.wopener.write(filename, data)
737 737 if 'x' in flags:
738 738 util.setflags(self.wjoin(filename), False, True)
739 739
740 740 def wwritedata(self, filename, data):
741 741 return self._filter(self._decodefilterpats, filename, data)
742 742
743 743 def transaction(self, desc):
744 744 tr = self._transref and self._transref() or None
745 745 if tr and tr.running():
746 746 return tr.nest()
747 747
748 748 # abort here if the journal already exists
749 749 if os.path.exists(self.sjoin("journal")):
750 750 raise error.RepoError(
751 751 _("abandoned transaction found - run hg recover"))
752 752
753 753 journalfiles = self._writejournal(desc)
754 754 renames = [(x, undoname(x)) for x in journalfiles]
755 755
756 756 tr = transaction.transaction(self.ui.warn, self.sopener,
757 757 self.sjoin("journal"),
758 758 aftertrans(renames),
759 759 self.store.createmode)
760 760 self._transref = weakref.ref(tr)
761 761 return tr
762 762
763 763 def _writejournal(self, desc):
764 764 # save dirstate for rollback
765 765 try:
766 766 ds = self.opener.read("dirstate")
767 767 except IOError:
768 768 ds = ""
769 769 self.opener.write("journal.dirstate", ds)
770 770 self.opener.write("journal.branch",
771 771 encoding.fromlocal(self.dirstate.branch()))
772 772 self.opener.write("journal.desc",
773 773 "%d\n%s\n" % (len(self), desc))
774 774
775 775 bkname = self.join('bookmarks')
776 776 if os.path.exists(bkname):
777 777 util.copyfile(bkname, self.join('journal.bookmarks'))
778 778 else:
779 779 self.opener.write('journal.bookmarks', '')
780 780 phasesname = self.sjoin('phaseroots')
781 781 if os.path.exists(phasesname):
782 782 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
783 783 else:
784 784 self.sopener.write('journal.phaseroots', '')
785 785
786 786 return (self.sjoin('journal'), self.join('journal.dirstate'),
787 787 self.join('journal.branch'), self.join('journal.desc'),
788 788 self.join('journal.bookmarks'),
789 789 self.sjoin('journal.phaseroots'))
790 790
791 791 def recover(self):
792 792 lock = self.lock()
793 793 try:
794 794 if os.path.exists(self.sjoin("journal")):
795 795 self.ui.status(_("rolling back interrupted transaction\n"))
796 796 transaction.rollback(self.sopener, self.sjoin("journal"),
797 797 self.ui.warn)
798 798 self.invalidate()
799 799 return True
800 800 else:
801 801 self.ui.warn(_("no interrupted transaction available\n"))
802 802 return False
803 803 finally:
804 804 lock.release()
805 805
806 806 def rollback(self, dryrun=False, force=False):
807 807 wlock = lock = None
808 808 try:
809 809 wlock = self.wlock()
810 810 lock = self.lock()
811 811 if os.path.exists(self.sjoin("undo")):
812 812 return self._rollback(dryrun, force)
813 813 else:
814 814 self.ui.warn(_("no rollback information available\n"))
815 815 return 1
816 816 finally:
817 817 release(lock, wlock)
818 818
819 819 def _rollback(self, dryrun, force):
820 820 ui = self.ui
821 821 try:
822 822 args = self.opener.read('undo.desc').splitlines()
823 823 (oldlen, desc, detail) = (int(args[0]), args[1], None)
824 824 if len(args) >= 3:
825 825 detail = args[2]
826 826 oldtip = oldlen - 1
827 827
828 828 if detail and ui.verbose:
829 829 msg = (_('repository tip rolled back to revision %s'
830 830 ' (undo %s: %s)\n')
831 831 % (oldtip, desc, detail))
832 832 else:
833 833 msg = (_('repository tip rolled back to revision %s'
834 834 ' (undo %s)\n')
835 835 % (oldtip, desc))
836 836 except IOError:
837 837 msg = _('rolling back unknown transaction\n')
838 838 desc = None
839 839
840 840 if not force and self['.'] != self['tip'] and desc == 'commit':
841 841 raise util.Abort(
842 842 _('rollback of last commit while not checked out '
843 843 'may lose data'), hint=_('use -f to force'))
844 844
845 845 ui.status(msg)
846 846 if dryrun:
847 847 return 0
848 848
849 849 parents = self.dirstate.parents()
850 850 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
851 851 if os.path.exists(self.join('undo.bookmarks')):
852 852 util.rename(self.join('undo.bookmarks'),
853 853 self.join('bookmarks'))
854 854 if os.path.exists(self.sjoin('undo.phaseroots')):
855 855 util.rename(self.sjoin('undo.phaseroots'),
856 856 self.sjoin('phaseroots'))
857 857 self.invalidate()
858 858
859 859 parentgone = (parents[0] not in self.changelog.nodemap or
860 860 parents[1] not in self.changelog.nodemap)
861 861 if parentgone:
862 862 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
863 863 try:
864 864 branch = self.opener.read('undo.branch')
865 865 self.dirstate.setbranch(branch)
866 866 except IOError:
867 867 ui.warn(_('named branch could not be reset: '
868 868 'current branch is still \'%s\'\n')
869 869 % self.dirstate.branch())
870 870
871 871 self.dirstate.invalidate()
872 872 parents = tuple([p.rev() for p in self.parents()])
873 873 if len(parents) > 1:
874 874 ui.status(_('working directory now based on '
875 875 'revisions %d and %d\n') % parents)
876 876 else:
877 877 ui.status(_('working directory now based on '
878 878 'revision %d\n') % parents)
879 879 self.destroyed()
880 880 return 0
881 881
882 882 def invalidatecaches(self):
883 883 def delcache(name):
884 884 try:
885 885 delattr(self, name)
886 886 except AttributeError:
887 887 pass
888 888
889 889 delcache('_tagscache')
890 890 delcache('_phaserev')
891 891
892 892 self._branchcache = None # in UTF-8
893 893 self._branchcachetip = None
894 894
895 895 def invalidatedirstate(self):
896 896 '''Invalidates the dirstate, causing the next call to dirstate
897 897 to check if it was modified since the last time it was read,
898 898 rereading it if it has.
899 899
900 900 This is different to dirstate.invalidate() that it doesn't always
901 901 rereads the dirstate. Use dirstate.invalidate() if you want to
902 902 explicitly read the dirstate again (i.e. restoring it to a previous
903 903 known good state).'''
904 if 'dirstate' in self.__dict__:
905 for k in self.dirstate._filecache:
904 906 try:
905 delattr(self, 'dirstate')
907 delattr(self.dirstate, k)
906 908 except AttributeError:
907 909 pass
910 delattr(self, 'dirstate')
908 911
909 912 def invalidate(self):
910 913 for k in self._filecache:
911 914 # dirstate is invalidated separately in invalidatedirstate()
912 915 if k == 'dirstate':
913 916 continue
914 917
915 918 try:
916 919 delattr(self, k)
917 920 except AttributeError:
918 921 pass
919 922 self.invalidatecaches()
920 923
921 924 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
922 925 try:
923 926 l = lock.lock(lockname, 0, releasefn, desc=desc)
924 927 except error.LockHeld, inst:
925 928 if not wait:
926 929 raise
927 930 self.ui.warn(_("waiting for lock on %s held by %r\n") %
928 931 (desc, inst.locker))
929 932 # default to 600 seconds timeout
930 933 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
931 934 releasefn, desc=desc)
932 935 if acquirefn:
933 936 acquirefn()
934 937 return l
935 938
936 939 def _afterlock(self, callback):
937 940 """add a callback to the current repository lock.
938 941
939 942 The callback will be executed on lock release."""
940 943 l = self._lockref and self._lockref()
941 944 if l:
942 945 l.postrelease.append(callback)
943 946
944 947 def lock(self, wait=True):
945 948 '''Lock the repository store (.hg/store) and return a weak reference
946 949 to the lock. Use this before modifying the store (e.g. committing or
947 950 stripping). If you are opening a transaction, get a lock as well.)'''
948 951 l = self._lockref and self._lockref()
949 952 if l is not None and l.held:
950 953 l.lock()
951 954 return l
952 955
953 956 def unlock():
954 957 self.store.write()
955 958 if self._dirtyphases:
956 959 phases.writeroots(self)
957 960 self._dirtyphases = False
958 961 for k, ce in self._filecache.items():
959 962 if k == 'dirstate':
960 963 continue
961 964 ce.refresh()
962 965
963 966 l = self._lock(self.sjoin("lock"), wait, unlock,
964 967 self.invalidate, _('repository %s') % self.origroot)
965 968 self._lockref = weakref.ref(l)
966 969 return l
967 970
968 971 def wlock(self, wait=True):
969 972 '''Lock the non-store parts of the repository (everything under
970 973 .hg except .hg/store) and return a weak reference to the lock.
971 974 Use this before modifying files in .hg.'''
972 975 l = self._wlockref and self._wlockref()
973 976 if l is not None and l.held:
974 977 l.lock()
975 978 return l
976 979
977 980 def unlock():
978 981 self.dirstate.write()
979 982 ce = self._filecache.get('dirstate')
980 983 if ce:
981 984 ce.refresh()
982 985
983 986 l = self._lock(self.join("wlock"), wait, unlock,
984 987 self.invalidatedirstate, _('working directory of %s') %
985 988 self.origroot)
986 989 self._wlockref = weakref.ref(l)
987 990 return l
988 991
989 992 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
990 993 """
991 994 commit an individual file as part of a larger transaction
992 995 """
993 996
994 997 fname = fctx.path()
995 998 text = fctx.data()
996 999 flog = self.file(fname)
997 1000 fparent1 = manifest1.get(fname, nullid)
998 1001 fparent2 = fparent2o = manifest2.get(fname, nullid)
999 1002
1000 1003 meta = {}
1001 1004 copy = fctx.renamed()
1002 1005 if copy and copy[0] != fname:
1003 1006 # Mark the new revision of this file as a copy of another
1004 1007 # file. This copy data will effectively act as a parent
1005 1008 # of this new revision. If this is a merge, the first
1006 1009 # parent will be the nullid (meaning "look up the copy data")
1007 1010 # and the second one will be the other parent. For example:
1008 1011 #
1009 1012 # 0 --- 1 --- 3 rev1 changes file foo
1010 1013 # \ / rev2 renames foo to bar and changes it
1011 1014 # \- 2 -/ rev3 should have bar with all changes and
1012 1015 # should record that bar descends from
1013 1016 # bar in rev2 and foo in rev1
1014 1017 #
1015 1018 # this allows this merge to succeed:
1016 1019 #
1017 1020 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1018 1021 # \ / merging rev3 and rev4 should use bar@rev2
1019 1022 # \- 2 --- 4 as the merge base
1020 1023 #
1021 1024
1022 1025 cfname = copy[0]
1023 1026 crev = manifest1.get(cfname)
1024 1027 newfparent = fparent2
1025 1028
1026 1029 if manifest2: # branch merge
1027 1030 if fparent2 == nullid or crev is None: # copied on remote side
1028 1031 if cfname in manifest2:
1029 1032 crev = manifest2[cfname]
1030 1033 newfparent = fparent1
1031 1034
1032 1035 # find source in nearest ancestor if we've lost track
1033 1036 if not crev:
1034 1037 self.ui.debug(" %s: searching for copy revision for %s\n" %
1035 1038 (fname, cfname))
1036 1039 for ancestor in self[None].ancestors():
1037 1040 if cfname in ancestor:
1038 1041 crev = ancestor[cfname].filenode()
1039 1042 break
1040 1043
1041 1044 if crev:
1042 1045 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1043 1046 meta["copy"] = cfname
1044 1047 meta["copyrev"] = hex(crev)
1045 1048 fparent1, fparent2 = nullid, newfparent
1046 1049 else:
1047 1050 self.ui.warn(_("warning: can't find ancestor for '%s' "
1048 1051 "copied from '%s'!\n") % (fname, cfname))
1049 1052
1050 1053 elif fparent2 != nullid:
1051 1054 # is one parent an ancestor of the other?
1052 1055 fparentancestor = flog.ancestor(fparent1, fparent2)
1053 1056 if fparentancestor == fparent1:
1054 1057 fparent1, fparent2 = fparent2, nullid
1055 1058 elif fparentancestor == fparent2:
1056 1059 fparent2 = nullid
1057 1060
1058 1061 # is the file changed?
1059 1062 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1060 1063 changelist.append(fname)
1061 1064 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1062 1065
1063 1066 # are just the flags changed during merge?
1064 1067 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1065 1068 changelist.append(fname)
1066 1069
1067 1070 return fparent1
1068 1071
1069 1072 def commit(self, text="", user=None, date=None, match=None, force=False,
1070 1073 editor=False, extra={}):
1071 1074 """Add a new revision to current repository.
1072 1075
1073 1076 Revision information is gathered from the working directory,
1074 1077 match can be used to filter the committed files. If editor is
1075 1078 supplied, it is called to get a commit message.
1076 1079 """
1077 1080
1078 1081 def fail(f, msg):
1079 1082 raise util.Abort('%s: %s' % (f, msg))
1080 1083
1081 1084 if not match:
1082 1085 match = matchmod.always(self.root, '')
1083 1086
1084 1087 if not force:
1085 1088 vdirs = []
1086 1089 match.dir = vdirs.append
1087 1090 match.bad = fail
1088 1091
1089 1092 wlock = self.wlock()
1090 1093 try:
1091 1094 wctx = self[None]
1092 1095 merge = len(wctx.parents()) > 1
1093 1096
1094 1097 if (not force and merge and match and
1095 1098 (match.files() or match.anypats())):
1096 1099 raise util.Abort(_('cannot partially commit a merge '
1097 1100 '(do not specify files or patterns)'))
1098 1101
1099 1102 changes = self.status(match=match, clean=force)
1100 1103 if force:
1101 1104 changes[0].extend(changes[6]) # mq may commit unchanged files
1102 1105
1103 1106 # check subrepos
1104 1107 subs = []
1105 1108 removedsubs = set()
1106 1109 if '.hgsub' in wctx:
1107 1110 # only manage subrepos and .hgsubstate if .hgsub is present
1108 1111 for p in wctx.parents():
1109 1112 removedsubs.update(s for s in p.substate if match(s))
1110 1113 for s in wctx.substate:
1111 1114 removedsubs.discard(s)
1112 1115 if match(s) and wctx.sub(s).dirty():
1113 1116 subs.append(s)
1114 1117 if (subs or removedsubs):
1115 1118 if (not match('.hgsub') and
1116 1119 '.hgsub' in (wctx.modified() + wctx.added())):
1117 1120 raise util.Abort(
1118 1121 _("can't commit subrepos without .hgsub"))
1119 1122 if '.hgsubstate' not in changes[0]:
1120 1123 changes[0].insert(0, '.hgsubstate')
1121 1124 if '.hgsubstate' in changes[2]:
1122 1125 changes[2].remove('.hgsubstate')
1123 1126 elif '.hgsub' in changes[2]:
1124 1127 # clean up .hgsubstate when .hgsub is removed
1125 1128 if ('.hgsubstate' in wctx and
1126 1129 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1127 1130 changes[2].insert(0, '.hgsubstate')
1128 1131
1129 1132 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1130 1133 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1131 1134 if changedsubs:
1132 1135 raise util.Abort(_("uncommitted changes in subrepo %s")
1133 1136 % changedsubs[0],
1134 1137 hint=_("use --subrepos for recursive commit"))
1135 1138
1136 1139 # make sure all explicit patterns are matched
1137 1140 if not force and match.files():
1138 1141 matched = set(changes[0] + changes[1] + changes[2])
1139 1142
1140 1143 for f in match.files():
1141 1144 if f == '.' or f in matched or f in wctx.substate:
1142 1145 continue
1143 1146 if f in changes[3]: # missing
1144 1147 fail(f, _('file not found!'))
1145 1148 if f in vdirs: # visited directory
1146 1149 d = f + '/'
1147 1150 for mf in matched:
1148 1151 if mf.startswith(d):
1149 1152 break
1150 1153 else:
1151 1154 fail(f, _("no match under directory!"))
1152 1155 elif f not in self.dirstate:
1153 1156 fail(f, _("file not tracked!"))
1154 1157
1155 1158 if (not force and not extra.get("close") and not merge
1156 1159 and not (changes[0] or changes[1] or changes[2])
1157 1160 and wctx.branch() == wctx.p1().branch()):
1158 1161 return None
1159 1162
1160 1163 ms = mergemod.mergestate(self)
1161 1164 for f in changes[0]:
1162 1165 if f in ms and ms[f] == 'u':
1163 1166 raise util.Abort(_("unresolved merge conflicts "
1164 1167 "(see hg help resolve)"))
1165 1168
1166 1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1167 1170 if editor:
1168 1171 cctx._text = editor(self, cctx, subs)
1169 1172 edited = (text != cctx._text)
1170 1173
1171 1174 # commit subs
1172 1175 if subs or removedsubs:
1173 1176 state = wctx.substate.copy()
1174 1177 for s in sorted(subs):
1175 1178 sub = wctx.sub(s)
1176 1179 self.ui.status(_('committing subrepository %s\n') %
1177 1180 subrepo.subrelpath(sub))
1178 1181 sr = sub.commit(cctx._text, user, date)
1179 1182 state[s] = (state[s][0], sr)
1180 1183 subrepo.writestate(self, state)
1181 1184
1182 1185 # Save commit message in case this transaction gets rolled back
1183 1186 # (e.g. by a pretxncommit hook). Leave the content alone on
1184 1187 # the assumption that the user will use the same editor again.
1185 1188 msgfn = self.savecommitmessage(cctx._text)
1186 1189
1187 1190 p1, p2 = self.dirstate.parents()
1188 1191 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1189 1192 try:
1190 1193 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1191 1194 ret = self.commitctx(cctx, True)
1192 1195 except:
1193 1196 if edited:
1194 1197 self.ui.write(
1195 1198 _('note: commit message saved in %s\n') % msgfn)
1196 1199 raise
1197 1200
1198 1201 # update bookmarks, dirstate and mergestate
1199 1202 bookmarks.update(self, p1, ret)
1200 1203 for f in changes[0] + changes[1]:
1201 1204 self.dirstate.normal(f)
1202 1205 for f in changes[2]:
1203 1206 self.dirstate.drop(f)
1204 1207 self.dirstate.setparents(ret)
1205 1208 ms.reset()
1206 1209 finally:
1207 1210 wlock.release()
1208 1211
1209 1212 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1210 1213 return ret
1211 1214
1212 1215 def commitctx(self, ctx, error=False):
1213 1216 """Add a new revision to current repository.
1214 1217 Revision information is passed via the context argument.
1215 1218 """
1216 1219
1217 1220 tr = lock = None
1218 1221 removed = list(ctx.removed())
1219 1222 p1, p2 = ctx.p1(), ctx.p2()
1220 1223 user = ctx.user()
1221 1224
1222 1225 lock = self.lock()
1223 1226 try:
1224 1227 tr = self.transaction("commit")
1225 1228 trp = weakref.proxy(tr)
1226 1229
1227 1230 if ctx.files():
1228 1231 m1 = p1.manifest().copy()
1229 1232 m2 = p2.manifest()
1230 1233
1231 1234 # check in files
1232 1235 new = {}
1233 1236 changed = []
1234 1237 linkrev = len(self)
1235 1238 for f in sorted(ctx.modified() + ctx.added()):
1236 1239 self.ui.note(f + "\n")
1237 1240 try:
1238 1241 fctx = ctx[f]
1239 1242 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1240 1243 changed)
1241 1244 m1.set(f, fctx.flags())
1242 1245 except OSError, inst:
1243 1246 self.ui.warn(_("trouble committing %s!\n") % f)
1244 1247 raise
1245 1248 except IOError, inst:
1246 1249 errcode = getattr(inst, 'errno', errno.ENOENT)
1247 1250 if error or errcode and errcode != errno.ENOENT:
1248 1251 self.ui.warn(_("trouble committing %s!\n") % f)
1249 1252 raise
1250 1253 else:
1251 1254 removed.append(f)
1252 1255
1253 1256 # update manifest
1254 1257 m1.update(new)
1255 1258 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1256 1259 drop = [f for f in removed if f in m1]
1257 1260 for f in drop:
1258 1261 del m1[f]
1259 1262 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1260 1263 p2.manifestnode(), (new, drop))
1261 1264 files = changed + removed
1262 1265 else:
1263 1266 mn = p1.manifestnode()
1264 1267 files = []
1265 1268
1266 1269 # update changelog
1267 1270 self.changelog.delayupdate()
1268 1271 n = self.changelog.add(mn, files, ctx.description(),
1269 1272 trp, p1.node(), p2.node(),
1270 1273 user, ctx.date(), ctx.extra().copy())
1271 1274 p = lambda: self.changelog.writepending() and self.root or ""
1272 1275 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1273 1276 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1274 1277 parent2=xp2, pending=p)
1275 1278 self.changelog.finalize(trp)
1276 1279 # set the new commit is proper phase
1277 1280 targetphase = phases.newcommitphase(self.ui)
1278 1281 if targetphase:
1279 1282 # retract boundary do not alter parent changeset.
1280 1283 # if a parent have higher the resulting phase will
1281 1284 # be compliant anyway
1282 1285 #
1283 1286 # if minimal phase was 0 we don't need to retract anything
1284 1287 phases.retractboundary(self, targetphase, [n])
1285 1288 tr.close()
1286 1289 self.updatebranchcache()
1287 1290 return n
1288 1291 finally:
1289 1292 if tr:
1290 1293 tr.release()
1291 1294 lock.release()
1292 1295
1293 1296 def destroyed(self):
1294 1297 '''Inform the repository that nodes have been destroyed.
1295 1298 Intended for use by strip and rollback, so there's a common
1296 1299 place for anything that has to be done after destroying history.'''
1297 1300 # XXX it might be nice if we could take the list of destroyed
1298 1301 # nodes, but I don't see an easy way for rollback() to do that
1299 1302
1300 1303 # Ensure the persistent tag cache is updated. Doing it now
1301 1304 # means that the tag cache only has to worry about destroyed
1302 1305 # heads immediately after a strip/rollback. That in turn
1303 1306 # guarantees that "cachetip == currenttip" (comparing both rev
1304 1307 # and node) always means no nodes have been added or destroyed.
1305 1308
1306 1309 # XXX this is suboptimal when qrefresh'ing: we strip the current
1307 1310 # head, refresh the tag cache, then immediately add a new head.
1308 1311 # But I think doing it this way is necessary for the "instant
1309 1312 # tag cache retrieval" case to work.
1310 1313 self.invalidatecaches()
1311 1314
1312 1315 # Discard all cache entries to force reloading everything.
1313 1316 self._filecache.clear()
1314 1317
1315 1318 def walk(self, match, node=None):
1316 1319 '''
1317 1320 walk recursively through the directory tree or a given
1318 1321 changeset, finding all files matched by the match
1319 1322 function
1320 1323 '''
1321 1324 return self[node].walk(match)
1322 1325
1323 1326 def status(self, node1='.', node2=None, match=None,
1324 1327 ignored=False, clean=False, unknown=False,
1325 1328 listsubrepos=False):
1326 1329 """return status of files between two nodes or node and working directory
1327 1330
1328 1331 If node1 is None, use the first dirstate parent instead.
1329 1332 If node2 is None, compare node1 with working directory.
1330 1333 """
1331 1334
1332 1335 def mfmatches(ctx):
1333 1336 mf = ctx.manifest().copy()
1334 1337 for fn in mf.keys():
1335 1338 if not match(fn):
1336 1339 del mf[fn]
1337 1340 return mf
1338 1341
1339 1342 if isinstance(node1, context.changectx):
1340 1343 ctx1 = node1
1341 1344 else:
1342 1345 ctx1 = self[node1]
1343 1346 if isinstance(node2, context.changectx):
1344 1347 ctx2 = node2
1345 1348 else:
1346 1349 ctx2 = self[node2]
1347 1350
1348 1351 working = ctx2.rev() is None
1349 1352 parentworking = working and ctx1 == self['.']
1350 1353 match = match or matchmod.always(self.root, self.getcwd())
1351 1354 listignored, listclean, listunknown = ignored, clean, unknown
1352 1355
1353 1356 # load earliest manifest first for caching reasons
1354 1357 if not working and ctx2.rev() < ctx1.rev():
1355 1358 ctx2.manifest()
1356 1359
1357 1360 if not parentworking:
1358 1361 def bad(f, msg):
1359 1362 # 'f' may be a directory pattern from 'match.files()',
1360 1363 # so 'f not in ctx1' is not enough
1361 1364 if f not in ctx1 and f not in ctx1.dirs():
1362 1365 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1363 1366 match.bad = bad
1364 1367
1365 1368 if working: # we need to scan the working dir
1366 1369 subrepos = []
1367 1370 if '.hgsub' in self.dirstate:
1368 1371 subrepos = ctx2.substate.keys()
1369 1372 s = self.dirstate.status(match, subrepos, listignored,
1370 1373 listclean, listunknown)
1371 1374 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1372 1375
1373 1376 # check for any possibly clean files
1374 1377 if parentworking and cmp:
1375 1378 fixup = []
1376 1379 # do a full compare of any files that might have changed
1377 1380 for f in sorted(cmp):
1378 1381 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1379 1382 or ctx1[f].cmp(ctx2[f])):
1380 1383 modified.append(f)
1381 1384 else:
1382 1385 fixup.append(f)
1383 1386
1384 1387 # update dirstate for files that are actually clean
1385 1388 if fixup:
1386 1389 if listclean:
1387 1390 clean += fixup
1388 1391
1389 1392 try:
1390 1393 # updating the dirstate is optional
1391 1394 # so we don't wait on the lock
1392 1395 wlock = self.wlock(False)
1393 1396 try:
1394 1397 for f in fixup:
1395 1398 self.dirstate.normal(f)
1396 1399 finally:
1397 1400 wlock.release()
1398 1401 except error.LockError:
1399 1402 pass
1400 1403
1401 1404 if not parentworking:
1402 1405 mf1 = mfmatches(ctx1)
1403 1406 if working:
1404 1407 # we are comparing working dir against non-parent
1405 1408 # generate a pseudo-manifest for the working dir
1406 1409 mf2 = mfmatches(self['.'])
1407 1410 for f in cmp + modified + added:
1408 1411 mf2[f] = None
1409 1412 mf2.set(f, ctx2.flags(f))
1410 1413 for f in removed:
1411 1414 if f in mf2:
1412 1415 del mf2[f]
1413 1416 else:
1414 1417 # we are comparing two revisions
1415 1418 deleted, unknown, ignored = [], [], []
1416 1419 mf2 = mfmatches(ctx2)
1417 1420
1418 1421 modified, added, clean = [], [], []
1419 1422 for fn in mf2:
1420 1423 if fn in mf1:
1421 1424 if (fn not in deleted and
1422 1425 (mf1.flags(fn) != mf2.flags(fn) or
1423 1426 (mf1[fn] != mf2[fn] and
1424 1427 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1425 1428 modified.append(fn)
1426 1429 elif listclean:
1427 1430 clean.append(fn)
1428 1431 del mf1[fn]
1429 1432 elif fn not in deleted:
1430 1433 added.append(fn)
1431 1434 removed = mf1.keys()
1432 1435
1433 1436 if working and modified and not self.dirstate._checklink:
1434 1437 # Symlink placeholders may get non-symlink-like contents
1435 1438 # via user error or dereferencing by NFS or Samba servers,
1436 1439 # so we filter out any placeholders that don't look like a
1437 1440 # symlink
1438 1441 sane = []
1439 1442 for f in modified:
1440 1443 if ctx2.flags(f) == 'l':
1441 1444 d = ctx2[f].data()
1442 1445 if len(d) >= 1024 or '\n' in d or util.binary(d):
1443 1446 self.ui.debug('ignoring suspect symlink placeholder'
1444 1447 ' "%s"\n' % f)
1445 1448 continue
1446 1449 sane.append(f)
1447 1450 modified = sane
1448 1451
1449 1452 r = modified, added, removed, deleted, unknown, ignored, clean
1450 1453
1451 1454 if listsubrepos:
1452 1455 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1453 1456 if working:
1454 1457 rev2 = None
1455 1458 else:
1456 1459 rev2 = ctx2.substate[subpath][1]
1457 1460 try:
1458 1461 submatch = matchmod.narrowmatcher(subpath, match)
1459 1462 s = sub.status(rev2, match=submatch, ignored=listignored,
1460 1463 clean=listclean, unknown=listunknown,
1461 1464 listsubrepos=True)
1462 1465 for rfiles, sfiles in zip(r, s):
1463 1466 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1464 1467 except error.LookupError:
1465 1468 self.ui.status(_("skipping missing subrepository: %s\n")
1466 1469 % subpath)
1467 1470
1468 1471 for l in r:
1469 1472 l.sort()
1470 1473 return r
1471 1474
1472 1475 def heads(self, start=None):
1473 1476 heads = self.changelog.heads(start)
1474 1477 # sort the output in rev descending order
1475 1478 return sorted(heads, key=self.changelog.rev, reverse=True)
1476 1479
1477 1480 def branchheads(self, branch=None, start=None, closed=False):
1478 1481 '''return a (possibly filtered) list of heads for the given branch
1479 1482
1480 1483 Heads are returned in topological order, from newest to oldest.
1481 1484 If branch is None, use the dirstate branch.
1482 1485 If start is not None, return only heads reachable from start.
1483 1486 If closed is True, return heads that are marked as closed as well.
1484 1487 '''
1485 1488 if branch is None:
1486 1489 branch = self[None].branch()
1487 1490 branches = self.branchmap()
1488 1491 if branch not in branches:
1489 1492 return []
1490 1493 # the cache returns heads ordered lowest to highest
1491 1494 bheads = list(reversed(branches[branch]))
1492 1495 if start is not None:
1493 1496 # filter out the heads that cannot be reached from startrev
1494 1497 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1495 1498 bheads = [h for h in bheads if h in fbheads]
1496 1499 if not closed:
1497 1500 bheads = [h for h in bheads if
1498 1501 ('close' not in self.changelog.read(h)[5])]
1499 1502 return bheads
1500 1503
1501 1504 def branches(self, nodes):
1502 1505 if not nodes:
1503 1506 nodes = [self.changelog.tip()]
1504 1507 b = []
1505 1508 for n in nodes:
1506 1509 t = n
1507 1510 while True:
1508 1511 p = self.changelog.parents(n)
1509 1512 if p[1] != nullid or p[0] == nullid:
1510 1513 b.append((t, n, p[0], p[1]))
1511 1514 break
1512 1515 n = p[0]
1513 1516 return b
1514 1517
1515 1518 def between(self, pairs):
1516 1519 r = []
1517 1520
1518 1521 for top, bottom in pairs:
1519 1522 n, l, i = top, [], 0
1520 1523 f = 1
1521 1524
1522 1525 while n != bottom and n != nullid:
1523 1526 p = self.changelog.parents(n)[0]
1524 1527 if i == f:
1525 1528 l.append(n)
1526 1529 f = f * 2
1527 1530 n = p
1528 1531 i += 1
1529 1532
1530 1533 r.append(l)
1531 1534
1532 1535 return r
1533 1536
1534 1537 def pull(self, remote, heads=None, force=False):
1535 1538 lock = self.lock()
1536 1539 try:
1537 1540 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1538 1541 force=force)
1539 1542 common, fetch, rheads = tmp
1540 1543 if not fetch:
1541 1544 self.ui.status(_("no changes found\n"))
1542 1545 added = []
1543 1546 result = 0
1544 1547 else:
1545 1548 if heads is None and list(common) == [nullid]:
1546 1549 self.ui.status(_("requesting all changes\n"))
1547 1550 elif heads is None and remote.capable('changegroupsubset'):
1548 1551 # issue1320, avoid a race if remote changed after discovery
1549 1552 heads = rheads
1550 1553
1551 1554 if remote.capable('getbundle'):
1552 1555 cg = remote.getbundle('pull', common=common,
1553 1556 heads=heads or rheads)
1554 1557 elif heads is None:
1555 1558 cg = remote.changegroup(fetch, 'pull')
1556 1559 elif not remote.capable('changegroupsubset'):
1557 1560 raise util.Abort(_("partial pull cannot be done because "
1558 1561 "other repository doesn't support "
1559 1562 "changegroupsubset."))
1560 1563 else:
1561 1564 cg = remote.changegroupsubset(fetch, heads, 'pull')
1562 1565 clstart = len(self.changelog)
1563 1566 result = self.addchangegroup(cg, 'pull', remote.url())
1564 1567 clend = len(self.changelog)
1565 1568 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1566 1569
1567 1570 # compute target subset
1568 1571 if heads is None:
1569 1572 # We pulled every thing possible
1570 1573 # sync on everything common
1571 1574 subset = common + added
1572 1575 else:
1573 1576 # We pulled a specific subset
1574 1577 # sync on this subset
1575 1578 subset = heads
1576 1579
1577 1580 # Get remote phases data from remote
1578 1581 remotephases = remote.listkeys('phases')
1579 1582 publishing = bool(remotephases.get('publishing', False))
1580 1583 if remotephases and not publishing:
1581 1584 # remote is new and unpublishing
1582 1585 pheads, _dr = phases.analyzeremotephases(self, subset,
1583 1586 remotephases)
1584 1587 phases.advanceboundary(self, phases.public, pheads)
1585 1588 phases.advanceboundary(self, phases.draft, subset)
1586 1589 else:
1587 1590 # Remote is old or publishing all common changesets
1588 1591 # should be seen as public
1589 1592 phases.advanceboundary(self, phases.public, subset)
1590 1593 finally:
1591 1594 lock.release()
1592 1595
1593 1596 return result
1594 1597
1595 1598 def checkpush(self, force, revs):
1596 1599 """Extensions can override this function if additional checks have
1597 1600 to be performed before pushing, or call it if they override push
1598 1601 command.
1599 1602 """
1600 1603 pass
1601 1604
1602 1605 def push(self, remote, force=False, revs=None, newbranch=False):
1603 1606 '''Push outgoing changesets (limited by revs) from the current
1604 1607 repository to remote. Return an integer:
1605 1608 - None means nothing to push
1606 1609 - 0 means HTTP error
1607 1610 - 1 means we pushed and remote head count is unchanged *or*
1608 1611 we have outgoing changesets but refused to push
1609 1612 - other values as described by addchangegroup()
1610 1613 '''
1611 1614 # there are two ways to push to remote repo:
1612 1615 #
1613 1616 # addchangegroup assumes local user can lock remote
1614 1617 # repo (local filesystem, old ssh servers).
1615 1618 #
1616 1619 # unbundle assumes local user cannot lock remote repo (new ssh
1617 1620 # servers, http servers).
1618 1621
1619 1622 # get local lock as we might write phase data
1620 1623 locallock = self.lock()
1621 1624 try:
1622 1625 self.checkpush(force, revs)
1623 1626 lock = None
1624 1627 unbundle = remote.capable('unbundle')
1625 1628 if not unbundle:
1626 1629 lock = remote.lock()
1627 1630 try:
1628 1631 # discovery
1629 1632 fci = discovery.findcommonincoming
1630 1633 commoninc = fci(self, remote, force=force)
1631 1634 common, inc, remoteheads = commoninc
1632 1635 fco = discovery.findcommonoutgoing
1633 1636 outgoing = fco(self, remote, onlyheads=revs,
1634 1637 commoninc=commoninc, force=force)
1635 1638
1636 1639
1637 1640 if not outgoing.missing:
1638 1641 # nothing to push
1639 1642 scmutil.nochangesfound(self.ui, outgoing.excluded)
1640 1643 ret = None
1641 1644 else:
1642 1645 # something to push
1643 1646 if not force:
1644 1647 discovery.checkheads(self, remote, outgoing,
1645 1648 remoteheads, newbranch,
1646 1649 bool(inc))
1647 1650
1648 1651 # create a changegroup from local
1649 1652 if revs is None and not outgoing.excluded:
1650 1653 # push everything,
1651 1654 # use the fast path, no race possible on push
1652 1655 cg = self._changegroup(outgoing.missing, 'push')
1653 1656 else:
1654 1657 cg = self.getlocalbundle('push', outgoing)
1655 1658
1656 1659 # apply changegroup to remote
1657 1660 if unbundle:
1658 1661 # local repo finds heads on server, finds out what
1659 1662 # revs it must push. once revs transferred, if server
1660 1663 # finds it has different heads (someone else won
1661 1664 # commit/push race), server aborts.
1662 1665 if force:
1663 1666 remoteheads = ['force']
1664 1667 # ssh: return remote's addchangegroup()
1665 1668 # http: return remote's addchangegroup() or 0 for error
1666 1669 ret = remote.unbundle(cg, remoteheads, 'push')
1667 1670 else:
1668 1671 # we return an integer indicating remote head count change
1669 1672 ret = remote.addchangegroup(cg, 'push', self.url())
1670 1673
1671 1674 if ret:
1672 1675 # push succeed, synchonize target of the push
1673 1676 cheads = outgoing.missingheads
1674 1677 elif revs is None:
1675 1678 # All out push fails. synchronize all common
1676 1679 cheads = outgoing.commonheads
1677 1680 else:
1678 1681 # I want cheads = heads(::missingheads and ::commonheads)
1679 1682 # (missingheads is revs with secret changeset filtered out)
1680 1683 #
1681 1684 # This can be expressed as:
1682 1685 # cheads = ( (missingheads and ::commonheads)
1683 1686 # + (commonheads and ::missingheads))"
1684 1687 # )
1685 1688 #
1686 1689 # while trying to push we already computed the following:
1687 1690 # common = (::commonheads)
1688 1691 # missing = ((commonheads::missingheads) - commonheads)
1689 1692 #
1690 1693 # We can pick:
1691 1694 # * missingheads part of comon (::commonheads)
1692 1695 common = set(outgoing.common)
1693 1696 cheads = [node for node in revs if node in common]
1694 1697 # and
1695 1698 # * commonheads parents on missing
1696 1699 revset = self.set('%ln and parents(roots(%ln))',
1697 1700 outgoing.commonheads,
1698 1701 outgoing.missing)
1699 1702 cheads.extend(c.node() for c in revset)
1700 1703 # even when we don't push, exchanging phase data is useful
1701 1704 remotephases = remote.listkeys('phases')
1702 1705 if not remotephases: # old server or public only repo
1703 1706 phases.advanceboundary(self, phases.public, cheads)
1704 1707 # don't push any phase data as there is nothing to push
1705 1708 else:
1706 1709 ana = phases.analyzeremotephases(self, cheads, remotephases)
1707 1710 pheads, droots = ana
1708 1711 ### Apply remote phase on local
1709 1712 if remotephases.get('publishing', False):
1710 1713 phases.advanceboundary(self, phases.public, cheads)
1711 1714 else: # publish = False
1712 1715 phases.advanceboundary(self, phases.public, pheads)
1713 1716 phases.advanceboundary(self, phases.draft, cheads)
1714 1717 ### Apply local phase on remote
1715 1718
1716 1719 # Get the list of all revs draft on remote by public here.
1717 1720 # XXX Beware that revset break if droots is not strictly
1718 1721 # XXX root we may want to ensure it is but it is costly
1719 1722 outdated = self.set('heads((%ln::%ln) and public())',
1720 1723 droots, cheads)
1721 1724 for newremotehead in outdated:
1722 1725 r = remote.pushkey('phases',
1723 1726 newremotehead.hex(),
1724 1727 str(phases.draft),
1725 1728 str(phases.public))
1726 1729 if not r:
1727 1730 self.ui.warn(_('updating %s to public failed!\n')
1728 1731 % newremotehead)
1729 1732 finally:
1730 1733 if lock is not None:
1731 1734 lock.release()
1732 1735 finally:
1733 1736 locallock.release()
1734 1737
1735 1738 self.ui.debug("checking for updated bookmarks\n")
1736 1739 rb = remote.listkeys('bookmarks')
1737 1740 for k in rb.keys():
1738 1741 if k in self._bookmarks:
1739 1742 nr, nl = rb[k], hex(self._bookmarks[k])
1740 1743 if nr in self:
1741 1744 cr = self[nr]
1742 1745 cl = self[nl]
1743 1746 if cl in cr.descendants():
1744 1747 r = remote.pushkey('bookmarks', k, nr, nl)
1745 1748 if r:
1746 1749 self.ui.status(_("updating bookmark %s\n") % k)
1747 1750 else:
1748 1751 self.ui.warn(_('updating bookmark %s'
1749 1752 ' failed!\n') % k)
1750 1753
1751 1754 return ret
1752 1755
1753 1756 def changegroupinfo(self, nodes, source):
1754 1757 if self.ui.verbose or source == 'bundle':
1755 1758 self.ui.status(_("%d changesets found\n") % len(nodes))
1756 1759 if self.ui.debugflag:
1757 1760 self.ui.debug("list of changesets:\n")
1758 1761 for node in nodes:
1759 1762 self.ui.debug("%s\n" % hex(node))
1760 1763
1761 1764 def changegroupsubset(self, bases, heads, source):
1762 1765 """Compute a changegroup consisting of all the nodes that are
1763 1766 descendants of any of the bases and ancestors of any of the heads.
1764 1767 Return a chunkbuffer object whose read() method will return
1765 1768 successive changegroup chunks.
1766 1769
1767 1770 It is fairly complex as determining which filenodes and which
1768 1771 manifest nodes need to be included for the changeset to be complete
1769 1772 is non-trivial.
1770 1773
1771 1774 Another wrinkle is doing the reverse, figuring out which changeset in
1772 1775 the changegroup a particular filenode or manifestnode belongs to.
1773 1776 """
1774 1777 cl = self.changelog
1775 1778 if not bases:
1776 1779 bases = [nullid]
1777 1780 csets, bases, heads = cl.nodesbetween(bases, heads)
1778 1781 # We assume that all ancestors of bases are known
1779 1782 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1780 1783 return self._changegroupsubset(common, csets, heads, source)
1781 1784
1782 1785 def getlocalbundle(self, source, outgoing):
1783 1786 """Like getbundle, but taking a discovery.outgoing as an argument.
1784 1787
1785 1788 This is only implemented for local repos and reuses potentially
1786 1789 precomputed sets in outgoing."""
1787 1790 if not outgoing.missing:
1788 1791 return None
1789 1792 return self._changegroupsubset(outgoing.common,
1790 1793 outgoing.missing,
1791 1794 outgoing.missingheads,
1792 1795 source)
1793 1796
1794 1797 def getbundle(self, source, heads=None, common=None):
1795 1798 """Like changegroupsubset, but returns the set difference between the
1796 1799 ancestors of heads and the ancestors common.
1797 1800
1798 1801 If heads is None, use the local heads. If common is None, use [nullid].
1799 1802
1800 1803 The nodes in common might not all be known locally due to the way the
1801 1804 current discovery protocol works.
1802 1805 """
1803 1806 cl = self.changelog
1804 1807 if common:
1805 1808 nm = cl.nodemap
1806 1809 common = [n for n in common if n in nm]
1807 1810 else:
1808 1811 common = [nullid]
1809 1812 if not heads:
1810 1813 heads = cl.heads()
1811 1814 return self.getlocalbundle(source,
1812 1815 discovery.outgoing(cl, common, heads))
1813 1816
1814 1817 def _changegroupsubset(self, commonrevs, csets, heads, source):
1815 1818
1816 1819 cl = self.changelog
1817 1820 mf = self.manifest
1818 1821 mfs = {} # needed manifests
1819 1822 fnodes = {} # needed file nodes
1820 1823 changedfiles = set()
1821 1824 fstate = ['', {}]
1822 1825 count = [0]
1823 1826
1824 1827 # can we go through the fast path ?
1825 1828 heads.sort()
1826 1829 if heads == sorted(self.heads()):
1827 1830 return self._changegroup(csets, source)
1828 1831
1829 1832 # slow path
1830 1833 self.hook('preoutgoing', throw=True, source=source)
1831 1834 self.changegroupinfo(csets, source)
1832 1835
1833 1836 # filter any nodes that claim to be part of the known set
1834 1837 def prune(revlog, missing):
1835 1838 return [n for n in missing
1836 1839 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1837 1840
1838 1841 def lookup(revlog, x):
1839 1842 if revlog == cl:
1840 1843 c = cl.read(x)
1841 1844 changedfiles.update(c[3])
1842 1845 mfs.setdefault(c[0], x)
1843 1846 count[0] += 1
1844 1847 self.ui.progress(_('bundling'), count[0],
1845 1848 unit=_('changesets'), total=len(csets))
1846 1849 return x
1847 1850 elif revlog == mf:
1848 1851 clnode = mfs[x]
1849 1852 mdata = mf.readfast(x)
1850 1853 for f in changedfiles:
1851 1854 if f in mdata:
1852 1855 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1853 1856 count[0] += 1
1854 1857 self.ui.progress(_('bundling'), count[0],
1855 1858 unit=_('manifests'), total=len(mfs))
1856 1859 return mfs[x]
1857 1860 else:
1858 1861 self.ui.progress(
1859 1862 _('bundling'), count[0], item=fstate[0],
1860 1863 unit=_('files'), total=len(changedfiles))
1861 1864 return fstate[1][x]
1862 1865
1863 1866 bundler = changegroup.bundle10(lookup)
1864 1867 reorder = self.ui.config('bundle', 'reorder', 'auto')
1865 1868 if reorder == 'auto':
1866 1869 reorder = None
1867 1870 else:
1868 1871 reorder = util.parsebool(reorder)
1869 1872
1870 1873 def gengroup():
1871 1874 # Create a changenode group generator that will call our functions
1872 1875 # back to lookup the owning changenode and collect information.
1873 1876 for chunk in cl.group(csets, bundler, reorder=reorder):
1874 1877 yield chunk
1875 1878 self.ui.progress(_('bundling'), None)
1876 1879
1877 1880 # Create a generator for the manifestnodes that calls our lookup
1878 1881 # and data collection functions back.
1879 1882 count[0] = 0
1880 1883 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1881 1884 yield chunk
1882 1885 self.ui.progress(_('bundling'), None)
1883 1886
1884 1887 mfs.clear()
1885 1888
1886 1889 # Go through all our files in order sorted by name.
1887 1890 count[0] = 0
1888 1891 for fname in sorted(changedfiles):
1889 1892 filerevlog = self.file(fname)
1890 1893 if not len(filerevlog):
1891 1894 raise util.Abort(_("empty or missing revlog for %s") % fname)
1892 1895 fstate[0] = fname
1893 1896 fstate[1] = fnodes.pop(fname, {})
1894 1897
1895 1898 nodelist = prune(filerevlog, fstate[1])
1896 1899 if nodelist:
1897 1900 count[0] += 1
1898 1901 yield bundler.fileheader(fname)
1899 1902 for chunk in filerevlog.group(nodelist, bundler, reorder):
1900 1903 yield chunk
1901 1904
1902 1905 # Signal that no more groups are left.
1903 1906 yield bundler.close()
1904 1907 self.ui.progress(_('bundling'), None)
1905 1908
1906 1909 if csets:
1907 1910 self.hook('outgoing', node=hex(csets[0]), source=source)
1908 1911
1909 1912 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1910 1913
1911 1914 def changegroup(self, basenodes, source):
1912 1915 # to avoid a race we use changegroupsubset() (issue1320)
1913 1916 return self.changegroupsubset(basenodes, self.heads(), source)
1914 1917
1915 1918 def _changegroup(self, nodes, source):
1916 1919 """Compute the changegroup of all nodes that we have that a recipient
1917 1920 doesn't. Return a chunkbuffer object whose read() method will return
1918 1921 successive changegroup chunks.
1919 1922
1920 1923 This is much easier than the previous function as we can assume that
1921 1924 the recipient has any changenode we aren't sending them.
1922 1925
1923 1926 nodes is the set of nodes to send"""
1924 1927
1925 1928 cl = self.changelog
1926 1929 mf = self.manifest
1927 1930 mfs = {}
1928 1931 changedfiles = set()
1929 1932 fstate = ['']
1930 1933 count = [0]
1931 1934
1932 1935 self.hook('preoutgoing', throw=True, source=source)
1933 1936 self.changegroupinfo(nodes, source)
1934 1937
1935 1938 revset = set([cl.rev(n) for n in nodes])
1936 1939
1937 1940 def gennodelst(log):
1938 1941 return [log.node(r) for r in log if log.linkrev(r) in revset]
1939 1942
1940 1943 def lookup(revlog, x):
1941 1944 if revlog == cl:
1942 1945 c = cl.read(x)
1943 1946 changedfiles.update(c[3])
1944 1947 mfs.setdefault(c[0], x)
1945 1948 count[0] += 1
1946 1949 self.ui.progress(_('bundling'), count[0],
1947 1950 unit=_('changesets'), total=len(nodes))
1948 1951 return x
1949 1952 elif revlog == mf:
1950 1953 count[0] += 1
1951 1954 self.ui.progress(_('bundling'), count[0],
1952 1955 unit=_('manifests'), total=len(mfs))
1953 1956 return cl.node(revlog.linkrev(revlog.rev(x)))
1954 1957 else:
1955 1958 self.ui.progress(
1956 1959 _('bundling'), count[0], item=fstate[0],
1957 1960 total=len(changedfiles), unit=_('files'))
1958 1961 return cl.node(revlog.linkrev(revlog.rev(x)))
1959 1962
1960 1963 bundler = changegroup.bundle10(lookup)
1961 1964 reorder = self.ui.config('bundle', 'reorder', 'auto')
1962 1965 if reorder == 'auto':
1963 1966 reorder = None
1964 1967 else:
1965 1968 reorder = util.parsebool(reorder)
1966 1969
1967 1970 def gengroup():
1968 1971 '''yield a sequence of changegroup chunks (strings)'''
1969 1972 # construct a list of all changed files
1970 1973
1971 1974 for chunk in cl.group(nodes, bundler, reorder=reorder):
1972 1975 yield chunk
1973 1976 self.ui.progress(_('bundling'), None)
1974 1977
1975 1978 count[0] = 0
1976 1979 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1977 1980 yield chunk
1978 1981 self.ui.progress(_('bundling'), None)
1979 1982
1980 1983 count[0] = 0
1981 1984 for fname in sorted(changedfiles):
1982 1985 filerevlog = self.file(fname)
1983 1986 if not len(filerevlog):
1984 1987 raise util.Abort(_("empty or missing revlog for %s") % fname)
1985 1988 fstate[0] = fname
1986 1989 nodelist = gennodelst(filerevlog)
1987 1990 if nodelist:
1988 1991 count[0] += 1
1989 1992 yield bundler.fileheader(fname)
1990 1993 for chunk in filerevlog.group(nodelist, bundler, reorder):
1991 1994 yield chunk
1992 1995 yield bundler.close()
1993 1996 self.ui.progress(_('bundling'), None)
1994 1997
1995 1998 if nodes:
1996 1999 self.hook('outgoing', node=hex(nodes[0]), source=source)
1997 2000
1998 2001 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1999 2002
2000 2003 def addchangegroup(self, source, srctype, url, emptyok=False):
2001 2004 """Add the changegroup returned by source.read() to this repo.
2002 2005 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2003 2006 the URL of the repo where this changegroup is coming from.
2004 2007
2005 2008 Return an integer summarizing the change to this repo:
2006 2009 - nothing changed or no source: 0
2007 2010 - more heads than before: 1+added heads (2..n)
2008 2011 - fewer heads than before: -1-removed heads (-2..-n)
2009 2012 - number of heads stays the same: 1
2010 2013 """
2011 2014 def csmap(x):
2012 2015 self.ui.debug("add changeset %s\n" % short(x))
2013 2016 return len(cl)
2014 2017
2015 2018 def revmap(x):
2016 2019 return cl.rev(x)
2017 2020
2018 2021 if not source:
2019 2022 return 0
2020 2023
2021 2024 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2022 2025
2023 2026 changesets = files = revisions = 0
2024 2027 efiles = set()
2025 2028
2026 2029 # write changelog data to temp files so concurrent readers will not see
2027 2030 # inconsistent view
2028 2031 cl = self.changelog
2029 2032 cl.delayupdate()
2030 2033 oldheads = cl.heads()
2031 2034
2032 2035 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2033 2036 try:
2034 2037 trp = weakref.proxy(tr)
2035 2038 # pull off the changeset group
2036 2039 self.ui.status(_("adding changesets\n"))
2037 2040 clstart = len(cl)
2038 2041 class prog(object):
2039 2042 step = _('changesets')
2040 2043 count = 1
2041 2044 ui = self.ui
2042 2045 total = None
2043 2046 def __call__(self):
2044 2047 self.ui.progress(self.step, self.count, unit=_('chunks'),
2045 2048 total=self.total)
2046 2049 self.count += 1
2047 2050 pr = prog()
2048 2051 source.callback = pr
2049 2052
2050 2053 source.changelogheader()
2051 2054 srccontent = cl.addgroup(source, csmap, trp)
2052 2055 if not (srccontent or emptyok):
2053 2056 raise util.Abort(_("received changelog group is empty"))
2054 2057 clend = len(cl)
2055 2058 changesets = clend - clstart
2056 2059 for c in xrange(clstart, clend):
2057 2060 efiles.update(self[c].files())
2058 2061 efiles = len(efiles)
2059 2062 self.ui.progress(_('changesets'), None)
2060 2063
2061 2064 # pull off the manifest group
2062 2065 self.ui.status(_("adding manifests\n"))
2063 2066 pr.step = _('manifests')
2064 2067 pr.count = 1
2065 2068 pr.total = changesets # manifests <= changesets
2066 2069 # no need to check for empty manifest group here:
2067 2070 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2068 2071 # no new manifest will be created and the manifest group will
2069 2072 # be empty during the pull
2070 2073 source.manifestheader()
2071 2074 self.manifest.addgroup(source, revmap, trp)
2072 2075 self.ui.progress(_('manifests'), None)
2073 2076
2074 2077 needfiles = {}
2075 2078 if self.ui.configbool('server', 'validate', default=False):
2076 2079 # validate incoming csets have their manifests
2077 2080 for cset in xrange(clstart, clend):
2078 2081 mfest = self.changelog.read(self.changelog.node(cset))[0]
2079 2082 mfest = self.manifest.readdelta(mfest)
2080 2083 # store file nodes we must see
2081 2084 for f, n in mfest.iteritems():
2082 2085 needfiles.setdefault(f, set()).add(n)
2083 2086
2084 2087 # process the files
2085 2088 self.ui.status(_("adding file changes\n"))
2086 2089 pr.step = _('files')
2087 2090 pr.count = 1
2088 2091 pr.total = efiles
2089 2092 source.callback = None
2090 2093
2091 2094 while True:
2092 2095 chunkdata = source.filelogheader()
2093 2096 if not chunkdata:
2094 2097 break
2095 2098 f = chunkdata["filename"]
2096 2099 self.ui.debug("adding %s revisions\n" % f)
2097 2100 pr()
2098 2101 fl = self.file(f)
2099 2102 o = len(fl)
2100 2103 if not fl.addgroup(source, revmap, trp):
2101 2104 raise util.Abort(_("received file revlog group is empty"))
2102 2105 revisions += len(fl) - o
2103 2106 files += 1
2104 2107 if f in needfiles:
2105 2108 needs = needfiles[f]
2106 2109 for new in xrange(o, len(fl)):
2107 2110 n = fl.node(new)
2108 2111 if n in needs:
2109 2112 needs.remove(n)
2110 2113 if not needs:
2111 2114 del needfiles[f]
2112 2115 self.ui.progress(_('files'), None)
2113 2116
2114 2117 for f, needs in needfiles.iteritems():
2115 2118 fl = self.file(f)
2116 2119 for n in needs:
2117 2120 try:
2118 2121 fl.rev(n)
2119 2122 except error.LookupError:
2120 2123 raise util.Abort(
2121 2124 _('missing file data for %s:%s - run hg verify') %
2122 2125 (f, hex(n)))
2123 2126
2124 2127 dh = 0
2125 2128 if oldheads:
2126 2129 heads = cl.heads()
2127 2130 dh = len(heads) - len(oldheads)
2128 2131 for h in heads:
2129 2132 if h not in oldheads and 'close' in self[h].extra():
2130 2133 dh -= 1
2131 2134 htext = ""
2132 2135 if dh:
2133 2136 htext = _(" (%+d heads)") % dh
2134 2137
2135 2138 self.ui.status(_("added %d changesets"
2136 2139 " with %d changes to %d files%s\n")
2137 2140 % (changesets, revisions, files, htext))
2138 2141
2139 2142 if changesets > 0:
2140 2143 p = lambda: cl.writepending() and self.root or ""
2141 2144 self.hook('pretxnchangegroup', throw=True,
2142 2145 node=hex(cl.node(clstart)), source=srctype,
2143 2146 url=url, pending=p)
2144 2147
2145 2148 added = [cl.node(r) for r in xrange(clstart, clend)]
2146 2149 publishing = self.ui.configbool('phases', 'publish', True)
2147 2150 if srctype == 'push':
2148 2151 # Old server can not push the boundary themself.
2149 2152 # New server won't push the boundary if changeset already
2150 2153 # existed locally as secrete
2151 2154 #
2152 2155 # We should not use added here but the list of all change in
2153 2156 # the bundle
2154 2157 if publishing:
2155 2158 phases.advanceboundary(self, phases.public, srccontent)
2156 2159 else:
2157 2160 phases.advanceboundary(self, phases.draft, srccontent)
2158 2161 phases.retractboundary(self, phases.draft, added)
2159 2162 elif srctype != 'strip':
2160 2163 # publishing only alter behavior during push
2161 2164 #
2162 2165 # strip should not touch boundary at all
2163 2166 phases.retractboundary(self, phases.draft, added)
2164 2167
2165 2168 # make changelog see real files again
2166 2169 cl.finalize(trp)
2167 2170
2168 2171 tr.close()
2169 2172
2170 2173 if changesets > 0:
2171 2174 def runhooks():
2172 2175 # forcefully update the on-disk branch cache
2173 2176 self.ui.debug("updating the branch cache\n")
2174 2177 self.updatebranchcache()
2175 2178 self.hook("changegroup", node=hex(cl.node(clstart)),
2176 2179 source=srctype, url=url)
2177 2180
2178 2181 for n in added:
2179 2182 self.hook("incoming", node=hex(n), source=srctype,
2180 2183 url=url)
2181 2184 self._afterlock(runhooks)
2182 2185
2183 2186 finally:
2184 2187 tr.release()
2185 2188 # never return 0 here:
2186 2189 if dh < 0:
2187 2190 return dh - 1
2188 2191 else:
2189 2192 return dh + 1
2190 2193
2191 2194 def stream_in(self, remote, requirements):
2192 2195 lock = self.lock()
2193 2196 try:
2194 2197 fp = remote.stream_out()
2195 2198 l = fp.readline()
2196 2199 try:
2197 2200 resp = int(l)
2198 2201 except ValueError:
2199 2202 raise error.ResponseError(
2200 2203 _('Unexpected response from remote server:'), l)
2201 2204 if resp == 1:
2202 2205 raise util.Abort(_('operation forbidden by server'))
2203 2206 elif resp == 2:
2204 2207 raise util.Abort(_('locking the remote repository failed'))
2205 2208 elif resp != 0:
2206 2209 raise util.Abort(_('the server sent an unknown error code'))
2207 2210 self.ui.status(_('streaming all changes\n'))
2208 2211 l = fp.readline()
2209 2212 try:
2210 2213 total_files, total_bytes = map(int, l.split(' ', 1))
2211 2214 except (ValueError, TypeError):
2212 2215 raise error.ResponseError(
2213 2216 _('Unexpected response from remote server:'), l)
2214 2217 self.ui.status(_('%d files to transfer, %s of data\n') %
2215 2218 (total_files, util.bytecount(total_bytes)))
2216 2219 start = time.time()
2217 2220 for i in xrange(total_files):
2218 2221 # XXX doesn't support '\n' or '\r' in filenames
2219 2222 l = fp.readline()
2220 2223 try:
2221 2224 name, size = l.split('\0', 1)
2222 2225 size = int(size)
2223 2226 except (ValueError, TypeError):
2224 2227 raise error.ResponseError(
2225 2228 _('Unexpected response from remote server:'), l)
2226 2229 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2227 2230 # for backwards compat, name was partially encoded
2228 2231 ofp = self.sopener(store.decodedir(name), 'w')
2229 2232 for chunk in util.filechunkiter(fp, limit=size):
2230 2233 ofp.write(chunk)
2231 2234 ofp.close()
2232 2235 elapsed = time.time() - start
2233 2236 if elapsed <= 0:
2234 2237 elapsed = 0.001
2235 2238 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2236 2239 (util.bytecount(total_bytes), elapsed,
2237 2240 util.bytecount(total_bytes / elapsed)))
2238 2241
2239 2242 # new requirements = old non-format requirements + new format-related
2240 2243 # requirements from the streamed-in repository
2241 2244 requirements.update(set(self.requirements) - self.supportedformats)
2242 2245 self._applyrequirements(requirements)
2243 2246 self._writerequirements()
2244 2247
2245 2248 self.invalidate()
2246 2249 return len(self.heads()) + 1
2247 2250 finally:
2248 2251 lock.release()
2249 2252
2250 2253 def clone(self, remote, heads=[], stream=False):
2251 2254 '''clone remote repository.
2252 2255
2253 2256 keyword arguments:
2254 2257 heads: list of revs to clone (forces use of pull)
2255 2258 stream: use streaming clone if possible'''
2256 2259
2257 2260 # now, all clients that can request uncompressed clones can
2258 2261 # read repo formats supported by all servers that can serve
2259 2262 # them.
2260 2263
2261 2264 # if revlog format changes, client will have to check version
2262 2265 # and format flags on "stream" capability, and use
2263 2266 # uncompressed only if compatible.
2264 2267
2265 2268 if stream and not heads:
2266 2269 # 'stream' means remote revlog format is revlogv1 only
2267 2270 if remote.capable('stream'):
2268 2271 return self.stream_in(remote, set(('revlogv1',)))
2269 2272 # otherwise, 'streamreqs' contains the remote revlog format
2270 2273 streamreqs = remote.capable('streamreqs')
2271 2274 if streamreqs:
2272 2275 streamreqs = set(streamreqs.split(','))
2273 2276 # if we support it, stream in and adjust our requirements
2274 2277 if not streamreqs - self.supportedformats:
2275 2278 return self.stream_in(remote, streamreqs)
2276 2279 return self.pull(remote, heads)
2277 2280
2278 2281 def pushkey(self, namespace, key, old, new):
2279 2282 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2280 2283 old=old, new=new)
2281 2284 ret = pushkey.push(self, namespace, key, old, new)
2282 2285 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2283 2286 ret=ret)
2284 2287 return ret
2285 2288
2286 2289 def listkeys(self, namespace):
2287 2290 self.hook('prelistkeys', throw=True, namespace=namespace)
2288 2291 values = pushkey.list(self, namespace)
2289 2292 self.hook('listkeys', namespace=namespace, values=values)
2290 2293 return values
2291 2294
2292 2295 def debugwireargs(self, one, two, three=None, four=None, five=None):
2293 2296 '''used to test argument passing over the wire'''
2294 2297 return "%s %s %s %s %s" % (one, two, three, four, five)
2295 2298
2296 2299 def savecommitmessage(self, text):
2297 2300 fp = self.opener('last-message.txt', 'wb')
2298 2301 try:
2299 2302 fp.write(text)
2300 2303 finally:
2301 2304 fp.close()
2302 2305 return self.pathto(fp.name[len(self.root)+1:])
2303 2306
2304 2307 # used to avoid circular references so destructors work
2305 2308 def aftertrans(files):
2306 2309 renamefiles = [tuple(t) for t in files]
2307 2310 def a():
2308 2311 for src, dest in renamefiles:
2309 2312 util.rename(src, dest)
2310 2313 return a
2311 2314
2312 2315 def undoname(fn):
2313 2316 base, name = os.path.split(fn)
2314 2317 assert name.startswith('journal')
2315 2318 return os.path.join(base, name.replace('journal', 'undo', 1))
2316 2319
2317 2320 def instance(ui, path, create):
2318 2321 return localrepository(ui, util.urllocalpath(path), create)
2319 2322
2320 2323 def islocal(path):
2321 2324 return True
General Comments 0
You need to be logged in to leave comments. Login now