##// END OF EJS Templates
improve walk docstrings
Matt Mackall -
r3532:26b556c1 default
parent child Browse files
Show More
@@ -1,529 +1,531
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "struct os time bisect stat strutil util re errno")
14 14
15 15 class dirstate(object):
16 16 format = ">cllll"
17 17
18 18 def __init__(self, opener, ui, root):
19 19 self.opener = opener
20 20 self.root = root
21 21 self.dirty = 0
22 22 self.ui = ui
23 23 self.map = None
24 24 self.pl = None
25 25 self.dirs = None
26 26 self.copymap = {}
27 27 self.ignorefunc = None
28 28 self.blockignore = False
29 29
30 30 def wjoin(self, f):
31 31 return os.path.join(self.root, f)
32 32
33 33 def getcwd(self):
34 34 cwd = os.getcwd()
35 35 if cwd == self.root: return ''
36 36 return cwd[len(self.root) + 1:]
37 37
38 38 def hgignore(self):
39 39 '''return the contents of .hgignore files as a list of patterns.
40 40
41 41 the files parsed for patterns include:
42 42 .hgignore in the repository root
43 43 any additional files specified in the [ui] section of ~/.hgrc
44 44
45 45 trailing white space is dropped.
46 46 the escape character is backslash.
47 47 comments start with #.
48 48 empty lines are skipped.
49 49
50 50 lines can be of the following formats:
51 51
52 52 syntax: regexp # defaults following lines to non-rooted regexps
53 53 syntax: glob # defaults following lines to non-rooted globs
54 54 re:pattern # non-rooted regular expression
55 55 glob:pattern # non-rooted glob
56 56 pattern # pattern of the current default type'''
57 57 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
58 58 def parselines(fp):
59 59 for line in fp:
60 60 escape = False
61 61 for i in xrange(len(line)):
62 62 if escape: escape = False
63 63 elif line[i] == '\\': escape = True
64 64 elif line[i] == '#': break
65 65 line = line[:i].rstrip()
66 66 if line: yield line
67 67 repoignore = self.wjoin('.hgignore')
68 68 files = [repoignore]
69 69 files.extend(self.ui.hgignorefiles())
70 70 pats = {}
71 71 for f in files:
72 72 try:
73 73 pats[f] = []
74 74 fp = open(f)
75 75 syntax = 'relre:'
76 76 for line in parselines(fp):
77 77 if line.startswith('syntax:'):
78 78 s = line[7:].strip()
79 79 try:
80 80 syntax = syntaxes[s]
81 81 except KeyError:
82 82 self.ui.warn(_("%s: ignoring invalid "
83 83 "syntax '%s'\n") % (f, s))
84 84 continue
85 85 pat = syntax + line
86 86 for s in syntaxes.values():
87 87 if line.startswith(s):
88 88 pat = line
89 89 break
90 90 pats[f].append(pat)
91 91 except IOError, inst:
92 92 if f != repoignore:
93 93 self.ui.warn(_("skipping unreadable ignore file"
94 94 " '%s': %s\n") % (f, inst.strerror))
95 95 return pats
96 96
97 97 def ignore(self, fn):
98 98 '''default match function used by dirstate and
99 99 localrepository. this honours the repository .hgignore file
100 100 and any other files specified in the [ui] section of .hgrc.'''
101 101 if self.blockignore:
102 102 return False
103 103 if not self.ignorefunc:
104 104 ignore = self.hgignore()
105 105 allpats = []
106 106 [allpats.extend(patlist) for patlist in ignore.values()]
107 107 if allpats:
108 108 try:
109 109 files, self.ignorefunc, anypats = (
110 110 util.matcher(self.root, inc=allpats, src='.hgignore'))
111 111 except util.Abort:
112 112 # Re-raise an exception where the src is the right file
113 113 for f, patlist in ignore.items():
114 114 files, self.ignorefunc, anypats = (
115 115 util.matcher(self.root, inc=patlist, src=f))
116 116 else:
117 117 self.ignorefunc = util.never
118 118 return self.ignorefunc(fn)
119 119
120 120 def __del__(self):
121 121 if self.dirty:
122 122 self.write()
123 123
124 124 def __getitem__(self, key):
125 125 try:
126 126 return self.map[key]
127 127 except TypeError:
128 128 self.lazyread()
129 129 return self[key]
130 130
131 131 def __contains__(self, key):
132 132 self.lazyread()
133 133 return key in self.map
134 134
135 135 def parents(self):
136 136 self.lazyread()
137 137 return self.pl
138 138
139 139 def markdirty(self):
140 140 if not self.dirty:
141 141 self.dirty = 1
142 142
143 143 def setparents(self, p1, p2=nullid):
144 144 self.lazyread()
145 145 self.markdirty()
146 146 self.pl = p1, p2
147 147
148 148 def state(self, key):
149 149 try:
150 150 return self[key][0]
151 151 except KeyError:
152 152 return "?"
153 153
154 154 def lazyread(self):
155 155 if self.map is None:
156 156 self.read()
157 157
158 158 def parse(self, st):
159 159 self.pl = [st[:20], st[20: 40]]
160 160
161 161 # deref fields so they will be local in loop
162 162 map = self.map
163 163 copymap = self.copymap
164 164 format = self.format
165 165 unpack = struct.unpack
166 166
167 167 pos = 40
168 168 e_size = struct.calcsize(format)
169 169
170 170 while pos < len(st):
171 171 newpos = pos + e_size
172 172 e = unpack(format, st[pos:newpos])
173 173 l = e[4]
174 174 pos = newpos
175 175 newpos = pos + l
176 176 f = st[pos:newpos]
177 177 if '\0' in f:
178 178 f, c = f.split('\0')
179 179 copymap[f] = c
180 180 map[f] = e[:4]
181 181 pos = newpos
182 182
183 183 def read(self):
184 184 self.map = {}
185 185 self.pl = [nullid, nullid]
186 186 try:
187 187 st = self.opener("dirstate").read()
188 188 if st:
189 189 self.parse(st)
190 190 except IOError, err:
191 191 if err.errno != errno.ENOENT: raise
192 192
193 193 def copy(self, source, dest):
194 194 self.lazyread()
195 195 self.markdirty()
196 196 self.copymap[dest] = source
197 197
198 198 def copied(self, file):
199 199 return self.copymap.get(file, None)
200 200
201 201 def copies(self):
202 202 return self.copymap
203 203
204 204 def initdirs(self):
205 205 if self.dirs is None:
206 206 self.dirs = {}
207 207 for f in self.map:
208 208 self.updatedirs(f, 1)
209 209
210 210 def updatedirs(self, path, delta):
211 211 if self.dirs is not None:
212 212 for c in strutil.findall(path, '/'):
213 213 pc = path[:c]
214 214 self.dirs.setdefault(pc, 0)
215 215 self.dirs[pc] += delta
216 216
217 217 def checkshadows(self, files):
218 218 def prefixes(f):
219 219 for c in strutil.rfindall(f, '/'):
220 220 yield f[:c]
221 221 self.lazyread()
222 222 self.initdirs()
223 223 seendirs = {}
224 224 for f in files:
225 225 if self.dirs.get(f):
226 226 raise util.Abort(_('directory named %r already in dirstate') %
227 227 f)
228 228 for d in prefixes(f):
229 229 if d in seendirs:
230 230 break
231 231 if d in self.map:
232 232 raise util.Abort(_('file named %r already in dirstate') %
233 233 d)
234 234 seendirs[d] = True
235 235
236 236 def update(self, files, state, **kw):
237 237 ''' current states:
238 238 n normal
239 239 m needs merging
240 240 r marked for removal
241 241 a marked for addition'''
242 242
243 243 if not files: return
244 244 self.lazyread()
245 245 self.markdirty()
246 246 if state == "a":
247 247 self.initdirs()
248 248 self.checkshadows(files)
249 249 for f in files:
250 250 if state == "r":
251 251 self.map[f] = ('r', 0, 0, 0)
252 252 self.updatedirs(f, -1)
253 253 else:
254 254 if state == "a":
255 255 self.updatedirs(f, 1)
256 256 s = os.lstat(self.wjoin(f))
257 257 st_size = kw.get('st_size', s.st_size)
258 258 st_mtime = kw.get('st_mtime', s.st_mtime)
259 259 self.map[f] = (state, s.st_mode, st_size, st_mtime)
260 260 if self.copymap.has_key(f):
261 261 del self.copymap[f]
262 262
263 263 def forget(self, files):
264 264 if not files: return
265 265 self.lazyread()
266 266 self.markdirty()
267 267 self.initdirs()
268 268 for f in files:
269 269 try:
270 270 del self.map[f]
271 271 self.updatedirs(f, -1)
272 272 except KeyError:
273 273 self.ui.warn(_("not in dirstate: %s!\n") % f)
274 274 pass
275 275
276 276 def clear(self):
277 277 self.map = {}
278 278 self.copymap = {}
279 279 self.dirs = None
280 280 self.markdirty()
281 281
282 282 def rebuild(self, parent, files):
283 283 self.clear()
284 284 umask = os.umask(0)
285 285 os.umask(umask)
286 286 for f in files:
287 287 if files.execf(f):
288 288 self.map[f] = ('n', ~umask, -1, 0)
289 289 else:
290 290 self.map[f] = ('n', ~umask & 0666, -1, 0)
291 291 self.pl = (parent, nullid)
292 292 self.markdirty()
293 293
294 294 def write(self):
295 295 if not self.dirty:
296 296 return
297 297 st = self.opener("dirstate", "w", atomic=True)
298 298 st.write("".join(self.pl))
299 299 for f, e in self.map.items():
300 300 c = self.copied(f)
301 301 if c:
302 302 f = f + "\0" + c
303 303 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
304 304 st.write(e + f)
305 305 self.dirty = 0
306 306
307 307 def filterfiles(self, files):
308 308 ret = {}
309 309 unknown = []
310 310
311 311 for x in files:
312 312 if x == '.':
313 313 return self.map.copy()
314 314 if x not in self.map:
315 315 unknown.append(x)
316 316 else:
317 317 ret[x] = self.map[x]
318 318
319 319 if not unknown:
320 320 return ret
321 321
322 322 b = self.map.keys()
323 323 b.sort()
324 324 blen = len(b)
325 325
326 326 for x in unknown:
327 327 bs = bisect.bisect(b, "%s%s" % (x, '/'))
328 328 while bs < blen:
329 329 s = b[bs]
330 330 if len(s) > len(x) and s.startswith(x):
331 331 ret[s] = self.map[s]
332 332 else:
333 333 break
334 334 bs += 1
335 335 return ret
336 336
337 337 def supported_type(self, f, st, verbose=False):
338 338 if stat.S_ISREG(st.st_mode):
339 339 return True
340 340 if verbose:
341 341 kind = 'unknown'
342 342 if stat.S_ISCHR(st.st_mode): kind = _('character device')
343 343 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
344 344 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
345 345 elif stat.S_ISLNK(st.st_mode): kind = _('symbolic link')
346 346 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
347 347 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
348 348 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
349 349 util.pathto(self.getcwd(), f),
350 350 kind))
351 351 return False
352 352
353 353 def walk(self, files=None, match=util.always, badmatch=None):
354 354 # filter out the stat
355 355 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
356 356 yield src, f
357 357
358 358 def statwalk(self, files=None, match=util.always, ignored=False,
359 359 badmatch=None):
360 360 '''
361 361 walk recursively through the directory tree, finding all files
362 362 matched by the match function
363 363
364 364 results are yielded in a tuple (src, filename, st), where src
365 365 is one of:
366 366 'f' the file was found in the directory tree
367 367 'm' the file was only in the dirstate and not in the tree
368 'b' file was not found and matched badmatch
369
368 370 and st is the stat result if the file was found in the directory.
369 371 '''
370 372 self.lazyread()
371 373
372 374 # walk all files by default
373 375 if not files:
374 376 files = [self.root]
375 377 dc = self.map.copy()
376 378 else:
377 379 dc = self.filterfiles(files)
378 380
379 381 def imatch(file_):
380 382 file_ = util.pconvert(file_)
381 383 if not ignored and file_ not in dc and self.ignore(file_):
382 384 return False
383 385 return match(file_)
384 386
385 387 # self.root may end with a path separator when self.root == '/'
386 388 common_prefix_len = len(self.root)
387 389 if not self.root.endswith('/'):
388 390 common_prefix_len += 1
389 391 # recursion free walker, faster than os.walk.
390 392 def findfiles(s):
391 393 work = [s]
392 394 while work:
393 395 top = work.pop()
394 396 names = os.listdir(top)
395 397 names.sort()
396 398 # nd is the top of the repository dir tree
397 399 nd = util.normpath(top[common_prefix_len:])
398 400 if nd == '.':
399 401 nd = ''
400 402 else:
401 403 # do not recurse into a repo contained in this
402 404 # one. use bisect to find .hg directory so speed
403 405 # is good on big directory.
404 406 hg = bisect.bisect_left(names, '.hg')
405 407 if hg < len(names) and names[hg] == '.hg':
406 408 if os.path.isdir(os.path.join(top, '.hg')):
407 409 continue
408 410 for f in names:
409 411 np = util.pconvert(os.path.join(nd, f))
410 412 if seen(np):
411 413 continue
412 414 p = os.path.join(top, f)
413 415 # don't trip over symlinks
414 416 st = os.lstat(p)
415 417 if stat.S_ISDIR(st.st_mode):
416 418 ds = os.path.join(nd, f +'/')
417 419 if imatch(ds):
418 420 work.append(p)
419 421 if imatch(np) and np in dc:
420 422 yield 'm', np, st
421 423 elif imatch(np):
422 424 if self.supported_type(np, st):
423 425 yield 'f', np, st
424 426 elif np in dc:
425 427 yield 'm', np, st
426 428
427 429 known = {'.hg': 1}
428 430 def seen(fn):
429 431 if fn in known: return True
430 432 known[fn] = 1
431 433
432 434 # step one, find all files that match our criteria
433 435 files.sort()
434 436 for ff in util.unique(files):
435 437 f = self.wjoin(ff)
436 438 try:
437 439 st = os.lstat(f)
438 440 except OSError, inst:
439 441 nf = util.normpath(ff)
440 442 found = False
441 443 for fn in dc:
442 444 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
443 445 found = True
444 446 break
445 447 if not found:
446 448 if inst.errno != errno.ENOENT or not badmatch:
447 449 self.ui.warn('%s: %s\n' % (
448 450 util.pathto(self.getcwd(), ff),
449 451 inst.strerror))
450 452 elif badmatch and badmatch(ff) and imatch(ff):
451 453 yield 'b', ff, None
452 454 continue
453 455 if stat.S_ISDIR(st.st_mode):
454 456 cmp1 = (lambda x, y: cmp(x[1], y[1]))
455 457 sorted_ = [ x for x in findfiles(f) ]
456 458 sorted_.sort(cmp1)
457 459 for e in sorted_:
458 460 yield e
459 461 else:
460 462 ff = util.normpath(ff)
461 463 if seen(ff):
462 464 continue
463 465 self.blockignore = True
464 466 if imatch(ff):
465 467 if self.supported_type(ff, st, verbose=True):
466 468 yield 'f', ff, st
467 469 elif ff in dc:
468 470 yield 'm', ff, st
469 471 self.blockignore = False
470 472
471 473 # step two run through anything left in the dc hash and yield
472 474 # if we haven't already seen it
473 475 ks = dc.keys()
474 476 ks.sort()
475 477 for k in ks:
476 478 if not seen(k) and imatch(k):
477 479 yield 'm', k, None
478 480
479 481 def status(self, files=None, match=util.always, list_ignored=False,
480 482 list_clean=False):
481 483 lookup, modified, added, unknown, ignored = [], [], [], [], []
482 484 removed, deleted, clean = [], [], []
483 485
484 486 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
485 487 try:
486 488 type_, mode, size, time = self[fn]
487 489 except KeyError:
488 490 if list_ignored and self.ignore(fn):
489 491 ignored.append(fn)
490 492 else:
491 493 unknown.append(fn)
492 494 continue
493 495 if src == 'm':
494 496 nonexistent = True
495 497 if not st:
496 498 try:
497 499 st = os.lstat(self.wjoin(fn))
498 500 except OSError, inst:
499 501 if inst.errno != errno.ENOENT:
500 502 raise
501 503 st = None
502 504 # We need to re-check that it is a valid file
503 505 if st and self.supported_type(fn, st):
504 506 nonexistent = False
505 507 # XXX: what to do with file no longer present in the fs
506 508 # who are not removed in the dirstate ?
507 509 if nonexistent and type_ in "nm":
508 510 deleted.append(fn)
509 511 continue
510 512 # check the common case first
511 513 if type_ == 'n':
512 514 if not st:
513 515 st = os.lstat(self.wjoin(fn))
514 516 if size >= 0 and (size != st.st_size
515 517 or (mode ^ st.st_mode) & 0100):
516 518 modified.append(fn)
517 519 elif time != int(st.st_mtime):
518 520 lookup.append(fn)
519 521 elif list_clean:
520 522 clean.append(fn)
521 523 elif type_ == 'm':
522 524 modified.append(fn)
523 525 elif type_ == 'a':
524 526 added.append(fn)
525 527 elif type_ == 'r':
526 528 removed.append(fn)
527 529
528 530 return (lookup, modified, added, removed, deleted, unknown, ignored,
529 531 clean)
@@ -1,1841 +1,1853
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.sopener = util.opener(self.path)
51 51 self.wopener = util.opener(self.root)
52 52
53 53 try:
54 54 self.ui.readconfig(self.join("hgrc"), self.root)
55 55 except IOError:
56 56 pass
57 57
58 58 v = self.ui.configrevlog()
59 59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
60 60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
61 61 fl = v.get('flags', None)
62 62 flags = 0
63 63 if fl != None:
64 64 for x in fl.split():
65 65 flags |= revlog.flagstr(x)
66 66 elif self.revlogv1:
67 67 flags = revlog.REVLOG_DEFAULT_FLAGS
68 68
69 69 v = self.revlogversion | flags
70 70 self.manifest = manifest.manifest(self.sopener, v)
71 71 self.changelog = changelog.changelog(self.sopener, v)
72 72
73 73 # the changelog might not have the inline index flag
74 74 # on. If the format of the changelog is the same as found in
75 75 # .hgrc, apply any flags found in the .hgrc as well.
76 76 # Otherwise, just version from the changelog
77 77 v = self.changelog.version
78 78 if v == self.revlogversion:
79 79 v |= flags
80 80 self.revlogversion = v
81 81
82 82 self.tagscache = None
83 83 self.branchcache = None
84 84 self.nodetagscache = None
85 85 self.encodepats = None
86 86 self.decodepats = None
87 87 self.transhandle = None
88 88
89 89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
90 90
91 91 def url(self):
92 92 return 'file:' + self.root
93 93
94 94 def hook(self, name, throw=False, **args):
95 95 def callhook(hname, funcname):
96 96 '''call python hook. hook is callable object, looked up as
97 97 name in python module. if callable returns "true", hook
98 98 fails, else passes. if hook raises exception, treated as
99 99 hook failure. exception propagates if throw is "true".
100 100
101 101 reason for "true" meaning "hook failed" is so that
102 102 unmodified commands (e.g. mercurial.commands.update) can
103 103 be run as hooks without wrappers to convert return values.'''
104 104
105 105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
106 106 d = funcname.rfind('.')
107 107 if d == -1:
108 108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
109 109 % (hname, funcname))
110 110 modname = funcname[:d]
111 111 try:
112 112 obj = __import__(modname)
113 113 except ImportError:
114 114 try:
115 115 # extensions are loaded with hgext_ prefix
116 116 obj = __import__("hgext_%s" % modname)
117 117 except ImportError:
118 118 raise util.Abort(_('%s hook is invalid '
119 119 '(import of "%s" failed)') %
120 120 (hname, modname))
121 121 try:
122 122 for p in funcname.split('.')[1:]:
123 123 obj = getattr(obj, p)
124 124 except AttributeError, err:
125 125 raise util.Abort(_('%s hook is invalid '
126 126 '("%s" is not defined)') %
127 127 (hname, funcname))
128 128 if not callable(obj):
129 129 raise util.Abort(_('%s hook is invalid '
130 130 '("%s" is not callable)') %
131 131 (hname, funcname))
132 132 try:
133 133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
134 134 except (KeyboardInterrupt, util.SignalInterrupt):
135 135 raise
136 136 except Exception, exc:
137 137 if isinstance(exc, util.Abort):
138 138 self.ui.warn(_('error: %s hook failed: %s\n') %
139 139 (hname, exc.args[0]))
140 140 else:
141 141 self.ui.warn(_('error: %s hook raised an exception: '
142 142 '%s\n') % (hname, exc))
143 143 if throw:
144 144 raise
145 145 self.ui.print_exc()
146 146 return True
147 147 if r:
148 148 if throw:
149 149 raise util.Abort(_('%s hook failed') % hname)
150 150 self.ui.warn(_('warning: %s hook failed\n') % hname)
151 151 return r
152 152
153 153 def runhook(name, cmd):
154 154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
155 155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
156 156 r = util.system(cmd, environ=env, cwd=self.root)
157 157 if r:
158 158 desc, r = util.explain_exit(r)
159 159 if throw:
160 160 raise util.Abort(_('%s hook %s') % (name, desc))
161 161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
162 162 return r
163 163
164 164 r = False
165 165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
166 166 if hname.split(".", 1)[0] == name and cmd]
167 167 hooks.sort()
168 168 for hname, cmd in hooks:
169 169 if cmd.startswith('python:'):
170 170 r = callhook(hname, cmd[7:].strip()) or r
171 171 else:
172 172 r = runhook(hname, cmd) or r
173 173 return r
174 174
175 175 tag_disallowed = ':\r\n'
176 176
177 177 def tag(self, name, node, message, local, user, date):
178 178 '''tag a revision with a symbolic name.
179 179
180 180 if local is True, the tag is stored in a per-repository file.
181 181 otherwise, it is stored in the .hgtags file, and a new
182 182 changeset is committed with the change.
183 183
184 184 keyword arguments:
185 185
186 186 local: whether to store tag in non-version-controlled file
187 187 (default False)
188 188
189 189 message: commit message to use if committing
190 190
191 191 user: name of user to use if committing
192 192
193 193 date: date tuple to use if committing'''
194 194
195 195 for c in self.tag_disallowed:
196 196 if c in name:
197 197 raise util.Abort(_('%r cannot be used in a tag name') % c)
198 198
199 199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
200 200
201 201 if local:
202 202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 203 self.hook('tag', node=hex(node), tag=name, local=local)
204 204 return
205 205
206 206 for x in self.status()[:5]:
207 207 if '.hgtags' in x:
208 208 raise util.Abort(_('working copy of .hgtags is changed '
209 209 '(please commit .hgtags manually)'))
210 210
211 211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
212 212 if self.dirstate.state('.hgtags') == '?':
213 213 self.add(['.hgtags'])
214 214
215 215 self.commit(['.hgtags'], message, user, date)
216 216 self.hook('tag', node=hex(node), tag=name, local=local)
217 217
218 218 def tags(self):
219 219 '''return a mapping of tag to node'''
220 220 if not self.tagscache:
221 221 self.tagscache = {}
222 222
223 223 def parsetag(line, context):
224 224 if not line:
225 225 return
226 226 s = l.split(" ", 1)
227 227 if len(s) != 2:
228 228 self.ui.warn(_("%s: cannot parse entry\n") % context)
229 229 return
230 230 node, key = s
231 231 key = key.strip()
232 232 try:
233 233 bin_n = bin(node)
234 234 except TypeError:
235 235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
236 236 (context, node))
237 237 return
238 238 if bin_n not in self.changelog.nodemap:
239 239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
240 240 (context, key))
241 241 return
242 242 self.tagscache[key] = bin_n
243 243
244 244 # read the tags file from each head, ending with the tip,
245 245 # and add each tag found to the map, with "newer" ones
246 246 # taking precedence
247 247 heads = self.heads()
248 248 heads.reverse()
249 249 seen = {}
250 250 for node in heads:
251 251 f = self.filectx('.hgtags', node)
252 252 if not f or f.filerev() in seen: continue
253 253 seen[f.filerev()] = 1
254 254 count = 0
255 255 for l in f.data().splitlines():
256 256 count += 1
257 257 parsetag(l, _("%s, line %d") % (str(f), count))
258 258
259 259 try:
260 260 f = self.opener("localtags")
261 261 count = 0
262 262 for l in f:
263 263 count += 1
264 264 parsetag(l, _("localtags, line %d") % count)
265 265 except IOError:
266 266 pass
267 267
268 268 self.tagscache['tip'] = self.changelog.tip()
269 269
270 270 return self.tagscache
271 271
272 272 def tagslist(self):
273 273 '''return a list of tags ordered by revision'''
274 274 l = []
275 275 for t, n in self.tags().items():
276 276 try:
277 277 r = self.changelog.rev(n)
278 278 except:
279 279 r = -2 # sort to the beginning of the list if unknown
280 280 l.append((r, t, n))
281 281 l.sort()
282 282 return [(t, n) for r, t, n in l]
283 283
284 284 def nodetags(self, node):
285 285 '''return the tags associated with a node'''
286 286 if not self.nodetagscache:
287 287 self.nodetagscache = {}
288 288 for t, n in self.tags().items():
289 289 self.nodetagscache.setdefault(n, []).append(t)
290 290 return self.nodetagscache.get(node, [])
291 291
292 292 def branchtags(self):
293 293 if self.branchcache != None:
294 294 return self.branchcache
295 295
296 296 self.branchcache = {} # avoid recursion in changectx
297 297
298 298 partial, last, lrev = self._readbranchcache()
299 299
300 300 tiprev = self.changelog.count() - 1
301 301 if lrev != tiprev:
302 302 self._updatebranchcache(partial, lrev+1, tiprev+1)
303 303 self._writebranchcache(partial, self.changelog.tip(), tiprev)
304 304
305 305 self.branchcache = partial
306 306 return self.branchcache
307 307
308 308 def _readbranchcache(self):
309 309 partial = {}
310 310 try:
311 311 f = self.opener("branches.cache")
312 312 last, lrev = f.readline().rstrip().split(" ", 1)
313 313 last, lrev = bin(last), int(lrev)
314 314 if (lrev < self.changelog.count() and
315 315 self.changelog.node(lrev) == last): # sanity check
316 316 for l in f:
317 317 node, label = l.rstrip().split(" ", 1)
318 318 partial[label] = bin(node)
319 319 else: # invalidate the cache
320 320 last, lrev = nullid, -1
321 321 f.close()
322 322 except IOError:
323 323 last, lrev = nullid, -1
324 324 return partial, last, lrev
325 325
326 326 def _writebranchcache(self, branches, tip, tiprev):
327 327 try:
328 328 f = self.opener("branches.cache", "w")
329 329 f.write("%s %s\n" % (hex(tip), tiprev))
330 330 for label, node in branches.iteritems():
331 331 f.write("%s %s\n" % (hex(node), label))
332 332 except IOError:
333 333 pass
334 334
335 335 def _updatebranchcache(self, partial, start, end):
336 336 for r in xrange(start, end):
337 337 c = self.changectx(r)
338 338 b = c.branch()
339 339 if b:
340 340 partial[b] = c.node()
341 341
342 342 def lookup(self, key):
343 343 if key == '.':
344 344 key = self.dirstate.parents()[0]
345 345 if key == nullid:
346 346 raise repo.RepoError(_("no revision checked out"))
347 347 n = self.changelog._match(key)
348 348 if n:
349 349 return n
350 350 if key in self.tags():
351 351 return self.tags()[key]
352 352 if key in self.branchtags():
353 353 return self.branchtags()[key]
354 354 n = self.changelog._partialmatch(key)
355 355 if n:
356 356 return n
357 357 raise repo.RepoError(_("unknown revision '%s'") % key)
358 358
359 359 def dev(self):
360 360 return os.lstat(self.path).st_dev
361 361
362 362 def local(self):
363 363 return True
364 364
365 365 def join(self, f):
366 366 return os.path.join(self.path, f)
367 367
368 368 def sjoin(self, f):
369 369 return os.path.join(self.path, f)
370 370
371 371 def wjoin(self, f):
372 372 return os.path.join(self.root, f)
373 373
374 374 def file(self, f):
375 375 if f[0] == '/':
376 376 f = f[1:]
377 377 return filelog.filelog(self.sopener, f, self.revlogversion)
378 378
379 379 def changectx(self, changeid=None):
380 380 return context.changectx(self, changeid)
381 381
382 382 def workingctx(self):
383 383 return context.workingctx(self)
384 384
385 385 def parents(self, changeid=None):
386 386 '''
387 387 get list of changectxs for parents of changeid or working directory
388 388 '''
389 389 if changeid is None:
390 390 pl = self.dirstate.parents()
391 391 else:
392 392 n = self.changelog.lookup(changeid)
393 393 pl = self.changelog.parents(n)
394 394 if pl[1] == nullid:
395 395 return [self.changectx(pl[0])]
396 396 return [self.changectx(pl[0]), self.changectx(pl[1])]
397 397
398 398 def filectx(self, path, changeid=None, fileid=None):
399 399 """changeid can be a changeset revision, node, or tag.
400 400 fileid can be a file revision or node."""
401 401 return context.filectx(self, path, changeid, fileid)
402 402
403 403 def getcwd(self):
404 404 return self.dirstate.getcwd()
405 405
406 406 def wfile(self, f, mode='r'):
407 407 return self.wopener(f, mode)
408 408
409 409 def wread(self, filename):
410 410 if self.encodepats == None:
411 411 l = []
412 412 for pat, cmd in self.ui.configitems("encode"):
413 413 mf = util.matcher(self.root, "", [pat], [], [])[1]
414 414 l.append((mf, cmd))
415 415 self.encodepats = l
416 416
417 417 data = self.wopener(filename, 'r').read()
418 418
419 419 for mf, cmd in self.encodepats:
420 420 if mf(filename):
421 421 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
422 422 data = util.filter(data, cmd)
423 423 break
424 424
425 425 return data
426 426
427 427 def wwrite(self, filename, data, fd=None):
428 428 if self.decodepats == None:
429 429 l = []
430 430 for pat, cmd in self.ui.configitems("decode"):
431 431 mf = util.matcher(self.root, "", [pat], [], [])[1]
432 432 l.append((mf, cmd))
433 433 self.decodepats = l
434 434
435 435 for mf, cmd in self.decodepats:
436 436 if mf(filename):
437 437 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
438 438 data = util.filter(data, cmd)
439 439 break
440 440
441 441 if fd:
442 442 return fd.write(data)
443 443 return self.wopener(filename, 'w').write(data)
444 444
445 445 def transaction(self):
446 446 tr = self.transhandle
447 447 if tr != None and tr.running():
448 448 return tr.nest()
449 449
450 450 # save dirstate for rollback
451 451 try:
452 452 ds = self.opener("dirstate").read()
453 453 except IOError:
454 454 ds = ""
455 455 self.opener("journal.dirstate", "w").write(ds)
456 456
457 457 tr = transaction.transaction(self.ui.warn, self.sopener,
458 458 self.sjoin("journal"),
459 459 aftertrans(self.path))
460 460 self.transhandle = tr
461 461 return tr
462 462
463 463 def recover(self):
464 464 l = self.lock()
465 465 if os.path.exists(self.sjoin("journal")):
466 466 self.ui.status(_("rolling back interrupted transaction\n"))
467 467 transaction.rollback(self.sopener, self.sjoin("journal"))
468 468 self.reload()
469 469 return True
470 470 else:
471 471 self.ui.warn(_("no interrupted transaction available\n"))
472 472 return False
473 473
474 474 def rollback(self, wlock=None):
475 475 if not wlock:
476 476 wlock = self.wlock()
477 477 l = self.lock()
478 478 if os.path.exists(self.sjoin("undo")):
479 479 self.ui.status(_("rolling back last transaction\n"))
480 480 transaction.rollback(self.sopener, self.sjoin("undo"))
481 481 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
482 482 self.reload()
483 483 self.wreload()
484 484 else:
485 485 self.ui.warn(_("no rollback information available\n"))
486 486
487 487 def wreload(self):
488 488 self.dirstate.read()
489 489
490 490 def reload(self):
491 491 self.changelog.load()
492 492 self.manifest.load()
493 493 self.tagscache = None
494 494 self.nodetagscache = None
495 495
496 496 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
497 497 desc=None):
498 498 try:
499 499 l = lock.lock(lockname, 0, releasefn, desc=desc)
500 500 except lock.LockHeld, inst:
501 501 if not wait:
502 502 raise
503 503 self.ui.warn(_("waiting for lock on %s held by %s\n") %
504 504 (desc, inst.args[0]))
505 505 # default to 600 seconds timeout
506 506 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
507 507 releasefn, desc=desc)
508 508 if acquirefn:
509 509 acquirefn()
510 510 return l
511 511
512 512 def lock(self, wait=1):
513 513 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
514 514 desc=_('repository %s') % self.origroot)
515 515
516 516 def wlock(self, wait=1):
517 517 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
518 518 self.wreload,
519 519 desc=_('working directory of %s') % self.origroot)
520 520
521 521 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
522 522 """
523 523 commit an individual file as part of a larger transaction
524 524 """
525 525
526 526 t = self.wread(fn)
527 527 fl = self.file(fn)
528 528 fp1 = manifest1.get(fn, nullid)
529 529 fp2 = manifest2.get(fn, nullid)
530 530
531 531 meta = {}
532 532 cp = self.dirstate.copied(fn)
533 533 if cp:
534 534 meta["copy"] = cp
535 535 if not manifest2: # not a branch merge
536 536 meta["copyrev"] = hex(manifest1.get(cp, nullid))
537 537 fp2 = nullid
538 538 elif fp2 != nullid: # copied on remote side
539 539 meta["copyrev"] = hex(manifest1.get(cp, nullid))
540 540 else: # copied on local side, reversed
541 541 meta["copyrev"] = hex(manifest2.get(cp))
542 542 fp2 = nullid
543 543 self.ui.debug(_(" %s: copy %s:%s\n") %
544 544 (fn, cp, meta["copyrev"]))
545 545 fp1 = nullid
546 546 elif fp2 != nullid:
547 547 # is one parent an ancestor of the other?
548 548 fpa = fl.ancestor(fp1, fp2)
549 549 if fpa == fp1:
550 550 fp1, fp2 = fp2, nullid
551 551 elif fpa == fp2:
552 552 fp2 = nullid
553 553
554 554 # is the file unmodified from the parent? report existing entry
555 555 if fp2 == nullid and not fl.cmp(fp1, t):
556 556 return fp1
557 557
558 558 changelist.append(fn)
559 559 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
560 560
561 561 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
562 562 orig_parent = self.dirstate.parents()[0] or nullid
563 563 p1 = p1 or self.dirstate.parents()[0] or nullid
564 564 p2 = p2 or self.dirstate.parents()[1] or nullid
565 565 c1 = self.changelog.read(p1)
566 566 c2 = self.changelog.read(p2)
567 567 m1 = self.manifest.read(c1[0]).copy()
568 568 m2 = self.manifest.read(c2[0])
569 569 changed = []
570 570 removed = []
571 571
572 572 if orig_parent == p1:
573 573 update_dirstate = 1
574 574 else:
575 575 update_dirstate = 0
576 576
577 577 if not wlock:
578 578 wlock = self.wlock()
579 579 l = self.lock()
580 580 tr = self.transaction()
581 581 linkrev = self.changelog.count()
582 582 for f in files:
583 583 try:
584 584 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
585 585 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
586 586 except IOError:
587 587 try:
588 588 del m1[f]
589 589 if update_dirstate:
590 590 self.dirstate.forget([f])
591 591 removed.append(f)
592 592 except:
593 593 # deleted from p2?
594 594 pass
595 595
596 596 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
597 597 user = user or self.ui.username()
598 598 n = self.changelog.add(mnode, changed + removed, text,
599 599 tr, p1, p2, user, date)
600 600 tr.close()
601 601 if update_dirstate:
602 602 self.dirstate.setparents(n, nullid)
603 603
604 604 def commit(self, files=None, text="", user=None, date=None,
605 605 match=util.always, force=False, lock=None, wlock=None,
606 606 force_editor=False):
607 607 commit = []
608 608 remove = []
609 609 changed = []
610 610
611 611 if files:
612 612 for f in files:
613 613 s = self.dirstate.state(f)
614 614 if s in 'nmai':
615 615 commit.append(f)
616 616 elif s == 'r':
617 617 remove.append(f)
618 618 else:
619 619 self.ui.warn(_("%s not tracked!\n") % f)
620 620 else:
621 621 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
622 622 commit = modified + added
623 623 remove = removed
624 624
625 625 p1, p2 = self.dirstate.parents()
626 626 c1 = self.changelog.read(p1)
627 627 c2 = self.changelog.read(p2)
628 628 m1 = self.manifest.read(c1[0]).copy()
629 629 m2 = self.manifest.read(c2[0])
630 630
631 631 branchname = self.workingctx().branch()
632 632 oldname = c1[5].get("branch", "")
633 633
634 634 if not commit and not remove and not force and p2 == nullid and \
635 635 branchname == oldname:
636 636 self.ui.status(_("nothing changed\n"))
637 637 return None
638 638
639 639 xp1 = hex(p1)
640 640 if p2 == nullid: xp2 = ''
641 641 else: xp2 = hex(p2)
642 642
643 643 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
644 644
645 645 if not wlock:
646 646 wlock = self.wlock()
647 647 if not lock:
648 648 lock = self.lock()
649 649 tr = self.transaction()
650 650
651 651 # check in files
652 652 new = {}
653 653 linkrev = self.changelog.count()
654 654 commit.sort()
655 655 for f in commit:
656 656 self.ui.note(f + "\n")
657 657 try:
658 658 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
659 659 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
660 660 except IOError:
661 661 self.ui.warn(_("trouble committing %s!\n") % f)
662 662 raise
663 663
664 664 # update manifest
665 665 m1.update(new)
666 666 for f in remove:
667 667 if f in m1:
668 668 del m1[f]
669 669 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
670 670
671 671 # add changeset
672 672 new = new.keys()
673 673 new.sort()
674 674
675 675 user = user or self.ui.username()
676 676 if not text or force_editor:
677 677 edittext = []
678 678 if text:
679 679 edittext.append(text)
680 680 edittext.append("")
681 681 if p2 != nullid:
682 682 edittext.append("HG: branch merge")
683 683 edittext.extend(["HG: changed %s" % f for f in changed])
684 684 edittext.extend(["HG: removed %s" % f for f in remove])
685 685 if not changed and not remove:
686 686 edittext.append("HG: no files changed")
687 687 edittext.append("")
688 688 # run editor in the repository root
689 689 olddir = os.getcwd()
690 690 os.chdir(self.root)
691 691 text = self.ui.edit("\n".join(edittext), user)
692 692 os.chdir(olddir)
693 693
694 694 lines = [line.rstrip() for line in text.rstrip().splitlines()]
695 695 while lines and not lines[0]:
696 696 del lines[0]
697 697 if not lines:
698 698 return None
699 699 text = '\n'.join(lines)
700 700 extra = {}
701 701 if branchname:
702 702 extra["branch"] = branchname
703 703 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
704 704 user, date, extra)
705 705 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
706 706 parent2=xp2)
707 707 tr.close()
708 708
709 709 self.dirstate.setparents(n)
710 710 self.dirstate.update(new, "n")
711 711 self.dirstate.forget(remove)
712 712
713 713 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
714 714 return n
715 715
716 716 def walk(self, node=None, files=[], match=util.always, badmatch=None):
717 '''
718 walk recursively through the directory tree or a given
719 changeset, finding all files matched by the match
720 function
721
722 results are yielded in a tuple (src, filename), where src
723 is one of:
724 'f' the file was found in the directory tree
725 'm' the file was only in the dirstate and not in the tree
726 'b' file was not found and matched badmatch
727 '''
728
717 729 if node:
718 730 fdict = dict.fromkeys(files)
719 731 for fn in self.manifest.read(self.changelog.read(node)[0]):
720 732 for ffn in fdict:
721 733 # match if the file is the exact name or a directory
722 734 if ffn == fn or fn.startswith("%s/" % ffn):
723 735 del fdict[ffn]
724 736 break
725 737 if match(fn):
726 738 yield 'm', fn
727 739 for fn in fdict:
728 740 if badmatch and badmatch(fn):
729 741 if match(fn):
730 742 yield 'b', fn
731 743 else:
732 744 self.ui.warn(_('%s: No such file in rev %s\n') % (
733 745 util.pathto(self.getcwd(), fn), short(node)))
734 746 else:
735 747 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
736 748 yield src, fn
737 749
738 750 def status(self, node1=None, node2=None, files=[], match=util.always,
739 751 wlock=None, list_ignored=False, list_clean=False):
740 752 """return status of files between two nodes or node and working directory
741 753
742 754 If node1 is None, use the first dirstate parent instead.
743 755 If node2 is None, compare node1 with working directory.
744 756 """
745 757
746 758 def fcmp(fn, mf):
747 759 t1 = self.wread(fn)
748 760 return self.file(fn).cmp(mf.get(fn, nullid), t1)
749 761
750 762 def mfmatches(node):
751 763 change = self.changelog.read(node)
752 764 mf = self.manifest.read(change[0]).copy()
753 765 for fn in mf.keys():
754 766 if not match(fn):
755 767 del mf[fn]
756 768 return mf
757 769
758 770 modified, added, removed, deleted, unknown = [], [], [], [], []
759 771 ignored, clean = [], []
760 772
761 773 compareworking = False
762 774 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
763 775 compareworking = True
764 776
765 777 if not compareworking:
766 778 # read the manifest from node1 before the manifest from node2,
767 779 # so that we'll hit the manifest cache if we're going through
768 780 # all the revisions in parent->child order.
769 781 mf1 = mfmatches(node1)
770 782
771 783 # are we comparing the working directory?
772 784 if not node2:
773 785 if not wlock:
774 786 try:
775 787 wlock = self.wlock(wait=0)
776 788 except lock.LockException:
777 789 wlock = None
778 790 (lookup, modified, added, removed, deleted, unknown,
779 791 ignored, clean) = self.dirstate.status(files, match,
780 792 list_ignored, list_clean)
781 793
782 794 # are we comparing working dir against its parent?
783 795 if compareworking:
784 796 if lookup:
785 797 # do a full compare of any files that might have changed
786 798 mf2 = mfmatches(self.dirstate.parents()[0])
787 799 for f in lookup:
788 800 if fcmp(f, mf2):
789 801 modified.append(f)
790 802 else:
791 803 clean.append(f)
792 804 if wlock is not None:
793 805 self.dirstate.update([f], "n")
794 806 else:
795 807 # we are comparing working dir against non-parent
796 808 # generate a pseudo-manifest for the working dir
797 809 # XXX: create it in dirstate.py ?
798 810 mf2 = mfmatches(self.dirstate.parents()[0])
799 811 for f in lookup + modified + added:
800 812 mf2[f] = ""
801 813 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
802 814 for f in removed:
803 815 if f in mf2:
804 816 del mf2[f]
805 817 else:
806 818 # we are comparing two revisions
807 819 mf2 = mfmatches(node2)
808 820
809 821 if not compareworking:
810 822 # flush lists from dirstate before comparing manifests
811 823 modified, added, clean = [], [], []
812 824
813 825 # make sure to sort the files so we talk to the disk in a
814 826 # reasonable order
815 827 mf2keys = mf2.keys()
816 828 mf2keys.sort()
817 829 for fn in mf2keys:
818 830 if mf1.has_key(fn):
819 831 if mf1.flags(fn) != mf2.flags(fn) or \
820 832 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
821 833 modified.append(fn)
822 834 elif list_clean:
823 835 clean.append(fn)
824 836 del mf1[fn]
825 837 else:
826 838 added.append(fn)
827 839
828 840 removed = mf1.keys()
829 841
830 842 # sort and return results:
831 843 for l in modified, added, removed, deleted, unknown, ignored, clean:
832 844 l.sort()
833 845 return (modified, added, removed, deleted, unknown, ignored, clean)
834 846
835 847 def add(self, list, wlock=None):
836 848 if not wlock:
837 849 wlock = self.wlock()
838 850 for f in list:
839 851 p = self.wjoin(f)
840 852 if not os.path.exists(p):
841 853 self.ui.warn(_("%s does not exist!\n") % f)
842 854 elif not os.path.isfile(p):
843 855 self.ui.warn(_("%s not added: only files supported currently\n")
844 856 % f)
845 857 elif self.dirstate.state(f) in 'an':
846 858 self.ui.warn(_("%s already tracked!\n") % f)
847 859 else:
848 860 self.dirstate.update([f], "a")
849 861
850 862 def forget(self, list, wlock=None):
851 863 if not wlock:
852 864 wlock = self.wlock()
853 865 for f in list:
854 866 if self.dirstate.state(f) not in 'ai':
855 867 self.ui.warn(_("%s not added!\n") % f)
856 868 else:
857 869 self.dirstate.forget([f])
858 870
859 871 def remove(self, list, unlink=False, wlock=None):
860 872 if unlink:
861 873 for f in list:
862 874 try:
863 875 util.unlink(self.wjoin(f))
864 876 except OSError, inst:
865 877 if inst.errno != errno.ENOENT:
866 878 raise
867 879 if not wlock:
868 880 wlock = self.wlock()
869 881 for f in list:
870 882 p = self.wjoin(f)
871 883 if os.path.exists(p):
872 884 self.ui.warn(_("%s still exists!\n") % f)
873 885 elif self.dirstate.state(f) == 'a':
874 886 self.dirstate.forget([f])
875 887 elif f not in self.dirstate:
876 888 self.ui.warn(_("%s not tracked!\n") % f)
877 889 else:
878 890 self.dirstate.update([f], "r")
879 891
880 892 def undelete(self, list, wlock=None):
881 893 p = self.dirstate.parents()[0]
882 894 mn = self.changelog.read(p)[0]
883 895 m = self.manifest.read(mn)
884 896 if not wlock:
885 897 wlock = self.wlock()
886 898 for f in list:
887 899 if self.dirstate.state(f) not in "r":
888 900 self.ui.warn("%s not removed!\n" % f)
889 901 else:
890 902 t = self.file(f).read(m[f])
891 903 self.wwrite(f, t)
892 904 util.set_exec(self.wjoin(f), m.execf(f))
893 905 self.dirstate.update([f], "n")
894 906
895 907 def copy(self, source, dest, wlock=None):
896 908 p = self.wjoin(dest)
897 909 if not os.path.exists(p):
898 910 self.ui.warn(_("%s does not exist!\n") % dest)
899 911 elif not os.path.isfile(p):
900 912 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
901 913 else:
902 914 if not wlock:
903 915 wlock = self.wlock()
904 916 if self.dirstate.state(dest) == '?':
905 917 self.dirstate.update([dest], "a")
906 918 self.dirstate.copy(source, dest)
907 919
908 920 def heads(self, start=None):
909 921 heads = self.changelog.heads(start)
910 922 # sort the output in rev descending order
911 923 heads = [(-self.changelog.rev(h), h) for h in heads]
912 924 heads.sort()
913 925 return [n for (r, n) in heads]
914 926
915 927 # branchlookup returns a dict giving a list of branches for
916 928 # each head. A branch is defined as the tag of a node or
917 929 # the branch of the node's parents. If a node has multiple
918 930 # branch tags, tags are eliminated if they are visible from other
919 931 # branch tags.
920 932 #
921 933 # So, for this graph: a->b->c->d->e
922 934 # \ /
923 935 # aa -----/
924 936 # a has tag 2.6.12
925 937 # d has tag 2.6.13
926 938 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
927 939 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
928 940 # from the list.
929 941 #
930 942 # It is possible that more than one head will have the same branch tag.
931 943 # callers need to check the result for multiple heads under the same
932 944 # branch tag if that is a problem for them (ie checkout of a specific
933 945 # branch).
934 946 #
935 947 # passing in a specific branch will limit the depth of the search
936 948 # through the parents. It won't limit the branches returned in the
937 949 # result though.
938 950 def branchlookup(self, heads=None, branch=None):
939 951 if not heads:
940 952 heads = self.heads()
941 953 headt = [ h for h in heads ]
942 954 chlog = self.changelog
943 955 branches = {}
944 956 merges = []
945 957 seenmerge = {}
946 958
947 959 # traverse the tree once for each head, recording in the branches
948 960 # dict which tags are visible from this head. The branches
949 961 # dict also records which tags are visible from each tag
950 962 # while we traverse.
951 963 while headt or merges:
952 964 if merges:
953 965 n, found = merges.pop()
954 966 visit = [n]
955 967 else:
956 968 h = headt.pop()
957 969 visit = [h]
958 970 found = [h]
959 971 seen = {}
960 972 while visit:
961 973 n = visit.pop()
962 974 if n in seen:
963 975 continue
964 976 pp = chlog.parents(n)
965 977 tags = self.nodetags(n)
966 978 if tags:
967 979 for x in tags:
968 980 if x == 'tip':
969 981 continue
970 982 for f in found:
971 983 branches.setdefault(f, {})[n] = 1
972 984 branches.setdefault(n, {})[n] = 1
973 985 break
974 986 if n not in found:
975 987 found.append(n)
976 988 if branch in tags:
977 989 continue
978 990 seen[n] = 1
979 991 if pp[1] != nullid and n not in seenmerge:
980 992 merges.append((pp[1], [x for x in found]))
981 993 seenmerge[n] = 1
982 994 if pp[0] != nullid:
983 995 visit.append(pp[0])
984 996 # traverse the branches dict, eliminating branch tags from each
985 997 # head that are visible from another branch tag for that head.
986 998 out = {}
987 999 viscache = {}
988 1000 for h in heads:
989 1001 def visible(node):
990 1002 if node in viscache:
991 1003 return viscache[node]
992 1004 ret = {}
993 1005 visit = [node]
994 1006 while visit:
995 1007 x = visit.pop()
996 1008 if x in viscache:
997 1009 ret.update(viscache[x])
998 1010 elif x not in ret:
999 1011 ret[x] = 1
1000 1012 if x in branches:
1001 1013 visit[len(visit):] = branches[x].keys()
1002 1014 viscache[node] = ret
1003 1015 return ret
1004 1016 if h not in branches:
1005 1017 continue
1006 1018 # O(n^2), but somewhat limited. This only searches the
1007 1019 # tags visible from a specific head, not all the tags in the
1008 1020 # whole repo.
1009 1021 for b in branches[h]:
1010 1022 vis = False
1011 1023 for bb in branches[h].keys():
1012 1024 if b != bb:
1013 1025 if b in visible(bb):
1014 1026 vis = True
1015 1027 break
1016 1028 if not vis:
1017 1029 l = out.setdefault(h, [])
1018 1030 l[len(l):] = self.nodetags(b)
1019 1031 return out
1020 1032
1021 1033 def branches(self, nodes):
1022 1034 if not nodes:
1023 1035 nodes = [self.changelog.tip()]
1024 1036 b = []
1025 1037 for n in nodes:
1026 1038 t = n
1027 1039 while 1:
1028 1040 p = self.changelog.parents(n)
1029 1041 if p[1] != nullid or p[0] == nullid:
1030 1042 b.append((t, n, p[0], p[1]))
1031 1043 break
1032 1044 n = p[0]
1033 1045 return b
1034 1046
1035 1047 def between(self, pairs):
1036 1048 r = []
1037 1049
1038 1050 for top, bottom in pairs:
1039 1051 n, l, i = top, [], 0
1040 1052 f = 1
1041 1053
1042 1054 while n != bottom:
1043 1055 p = self.changelog.parents(n)[0]
1044 1056 if i == f:
1045 1057 l.append(n)
1046 1058 f = f * 2
1047 1059 n = p
1048 1060 i += 1
1049 1061
1050 1062 r.append(l)
1051 1063
1052 1064 return r
1053 1065
1054 1066 def findincoming(self, remote, base=None, heads=None, force=False):
1055 1067 """Return list of roots of the subsets of missing nodes from remote
1056 1068
1057 1069 If base dict is specified, assume that these nodes and their parents
1058 1070 exist on the remote side and that no child of a node of base exists
1059 1071 in both remote and self.
1060 1072 Furthermore base will be updated to include the nodes that exists
1061 1073 in self and remote but no children exists in self and remote.
1062 1074 If a list of heads is specified, return only nodes which are heads
1063 1075 or ancestors of these heads.
1064 1076
1065 1077 All the ancestors of base are in self and in remote.
1066 1078 All the descendants of the list returned are missing in self.
1067 1079 (and so we know that the rest of the nodes are missing in remote, see
1068 1080 outgoing)
1069 1081 """
1070 1082 m = self.changelog.nodemap
1071 1083 search = []
1072 1084 fetch = {}
1073 1085 seen = {}
1074 1086 seenbranch = {}
1075 1087 if base == None:
1076 1088 base = {}
1077 1089
1078 1090 if not heads:
1079 1091 heads = remote.heads()
1080 1092
1081 1093 if self.changelog.tip() == nullid:
1082 1094 base[nullid] = 1
1083 1095 if heads != [nullid]:
1084 1096 return [nullid]
1085 1097 return []
1086 1098
1087 1099 # assume we're closer to the tip than the root
1088 1100 # and start by examining the heads
1089 1101 self.ui.status(_("searching for changes\n"))
1090 1102
1091 1103 unknown = []
1092 1104 for h in heads:
1093 1105 if h not in m:
1094 1106 unknown.append(h)
1095 1107 else:
1096 1108 base[h] = 1
1097 1109
1098 1110 if not unknown:
1099 1111 return []
1100 1112
1101 1113 req = dict.fromkeys(unknown)
1102 1114 reqcnt = 0
1103 1115
1104 1116 # search through remote branches
1105 1117 # a 'branch' here is a linear segment of history, with four parts:
1106 1118 # head, root, first parent, second parent
1107 1119 # (a branch always has two parents (or none) by definition)
1108 1120 unknown = remote.branches(unknown)
1109 1121 while unknown:
1110 1122 r = []
1111 1123 while unknown:
1112 1124 n = unknown.pop(0)
1113 1125 if n[0] in seen:
1114 1126 continue
1115 1127
1116 1128 self.ui.debug(_("examining %s:%s\n")
1117 1129 % (short(n[0]), short(n[1])))
1118 1130 if n[0] == nullid: # found the end of the branch
1119 1131 pass
1120 1132 elif n in seenbranch:
1121 1133 self.ui.debug(_("branch already found\n"))
1122 1134 continue
1123 1135 elif n[1] and n[1] in m: # do we know the base?
1124 1136 self.ui.debug(_("found incomplete branch %s:%s\n")
1125 1137 % (short(n[0]), short(n[1])))
1126 1138 search.append(n) # schedule branch range for scanning
1127 1139 seenbranch[n] = 1
1128 1140 else:
1129 1141 if n[1] not in seen and n[1] not in fetch:
1130 1142 if n[2] in m and n[3] in m:
1131 1143 self.ui.debug(_("found new changeset %s\n") %
1132 1144 short(n[1]))
1133 1145 fetch[n[1]] = 1 # earliest unknown
1134 1146 for p in n[2:4]:
1135 1147 if p in m:
1136 1148 base[p] = 1 # latest known
1137 1149
1138 1150 for p in n[2:4]:
1139 1151 if p not in req and p not in m:
1140 1152 r.append(p)
1141 1153 req[p] = 1
1142 1154 seen[n[0]] = 1
1143 1155
1144 1156 if r:
1145 1157 reqcnt += 1
1146 1158 self.ui.debug(_("request %d: %s\n") %
1147 1159 (reqcnt, " ".join(map(short, r))))
1148 1160 for p in xrange(0, len(r), 10):
1149 1161 for b in remote.branches(r[p:p+10]):
1150 1162 self.ui.debug(_("received %s:%s\n") %
1151 1163 (short(b[0]), short(b[1])))
1152 1164 unknown.append(b)
1153 1165
1154 1166 # do binary search on the branches we found
1155 1167 while search:
1156 1168 n = search.pop(0)
1157 1169 reqcnt += 1
1158 1170 l = remote.between([(n[0], n[1])])[0]
1159 1171 l.append(n[1])
1160 1172 p = n[0]
1161 1173 f = 1
1162 1174 for i in l:
1163 1175 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1164 1176 if i in m:
1165 1177 if f <= 2:
1166 1178 self.ui.debug(_("found new branch changeset %s\n") %
1167 1179 short(p))
1168 1180 fetch[p] = 1
1169 1181 base[i] = 1
1170 1182 else:
1171 1183 self.ui.debug(_("narrowed branch search to %s:%s\n")
1172 1184 % (short(p), short(i)))
1173 1185 search.append((p, i))
1174 1186 break
1175 1187 p, f = i, f * 2
1176 1188
1177 1189 # sanity check our fetch list
1178 1190 for f in fetch.keys():
1179 1191 if f in m:
1180 1192 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1181 1193
1182 1194 if base.keys() == [nullid]:
1183 1195 if force:
1184 1196 self.ui.warn(_("warning: repository is unrelated\n"))
1185 1197 else:
1186 1198 raise util.Abort(_("repository is unrelated"))
1187 1199
1188 1200 self.ui.debug(_("found new changesets starting at ") +
1189 1201 " ".join([short(f) for f in fetch]) + "\n")
1190 1202
1191 1203 self.ui.debug(_("%d total queries\n") % reqcnt)
1192 1204
1193 1205 return fetch.keys()
1194 1206
1195 1207 def findoutgoing(self, remote, base=None, heads=None, force=False):
1196 1208 """Return list of nodes that are roots of subsets not in remote
1197 1209
1198 1210 If base dict is specified, assume that these nodes and their parents
1199 1211 exist on the remote side.
1200 1212 If a list of heads is specified, return only nodes which are heads
1201 1213 or ancestors of these heads, and return a second element which
1202 1214 contains all remote heads which get new children.
1203 1215 """
1204 1216 if base == None:
1205 1217 base = {}
1206 1218 self.findincoming(remote, base, heads, force=force)
1207 1219
1208 1220 self.ui.debug(_("common changesets up to ")
1209 1221 + " ".join(map(short, base.keys())) + "\n")
1210 1222
1211 1223 remain = dict.fromkeys(self.changelog.nodemap)
1212 1224
1213 1225 # prune everything remote has from the tree
1214 1226 del remain[nullid]
1215 1227 remove = base.keys()
1216 1228 while remove:
1217 1229 n = remove.pop(0)
1218 1230 if n in remain:
1219 1231 del remain[n]
1220 1232 for p in self.changelog.parents(n):
1221 1233 remove.append(p)
1222 1234
1223 1235 # find every node whose parents have been pruned
1224 1236 subset = []
1225 1237 # find every remote head that will get new children
1226 1238 updated_heads = {}
1227 1239 for n in remain:
1228 1240 p1, p2 = self.changelog.parents(n)
1229 1241 if p1 not in remain and p2 not in remain:
1230 1242 subset.append(n)
1231 1243 if heads:
1232 1244 if p1 in heads:
1233 1245 updated_heads[p1] = True
1234 1246 if p2 in heads:
1235 1247 updated_heads[p2] = True
1236 1248
1237 1249 # this is the set of all roots we have to push
1238 1250 if heads:
1239 1251 return subset, updated_heads.keys()
1240 1252 else:
1241 1253 return subset
1242 1254
1243 1255 def pull(self, remote, heads=None, force=False, lock=None):
1244 1256 mylock = False
1245 1257 if not lock:
1246 1258 lock = self.lock()
1247 1259 mylock = True
1248 1260
1249 1261 try:
1250 1262 fetch = self.findincoming(remote, force=force)
1251 1263 if fetch == [nullid]:
1252 1264 self.ui.status(_("requesting all changes\n"))
1253 1265
1254 1266 if not fetch:
1255 1267 self.ui.status(_("no changes found\n"))
1256 1268 return 0
1257 1269
1258 1270 if heads is None:
1259 1271 cg = remote.changegroup(fetch, 'pull')
1260 1272 else:
1261 1273 if 'changegroupsubset' not in remote.capabilities:
1262 1274 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1263 1275 cg = remote.changegroupsubset(fetch, heads, 'pull')
1264 1276 return self.addchangegroup(cg, 'pull', remote.url())
1265 1277 finally:
1266 1278 if mylock:
1267 1279 lock.release()
1268 1280
1269 1281 def push(self, remote, force=False, revs=None):
1270 1282 # there are two ways to push to remote repo:
1271 1283 #
1272 1284 # addchangegroup assumes local user can lock remote
1273 1285 # repo (local filesystem, old ssh servers).
1274 1286 #
1275 1287 # unbundle assumes local user cannot lock remote repo (new ssh
1276 1288 # servers, http servers).
1277 1289
1278 1290 if remote.capable('unbundle'):
1279 1291 return self.push_unbundle(remote, force, revs)
1280 1292 return self.push_addchangegroup(remote, force, revs)
1281 1293
1282 1294 def prepush(self, remote, force, revs):
1283 1295 base = {}
1284 1296 remote_heads = remote.heads()
1285 1297 inc = self.findincoming(remote, base, remote_heads, force=force)
1286 1298 if not force and inc:
1287 1299 self.ui.warn(_("abort: unsynced remote changes!\n"))
1288 1300 self.ui.status(_("(did you forget to sync?"
1289 1301 " use push -f to force)\n"))
1290 1302 return None, 1
1291 1303
1292 1304 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1293 1305 if revs is not None:
1294 1306 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1295 1307 else:
1296 1308 bases, heads = update, self.changelog.heads()
1297 1309
1298 1310 if not bases:
1299 1311 self.ui.status(_("no changes found\n"))
1300 1312 return None, 1
1301 1313 elif not force:
1302 1314 # FIXME we don't properly detect creation of new heads
1303 1315 # in the push -r case, assume the user knows what he's doing
1304 1316 if not revs and len(remote_heads) < len(heads) \
1305 1317 and remote_heads != [nullid]:
1306 1318 self.ui.warn(_("abort: push creates new remote branches!\n"))
1307 1319 self.ui.status(_("(did you forget to merge?"
1308 1320 " use push -f to force)\n"))
1309 1321 return None, 1
1310 1322
1311 1323 if revs is None:
1312 1324 cg = self.changegroup(update, 'push')
1313 1325 else:
1314 1326 cg = self.changegroupsubset(update, revs, 'push')
1315 1327 return cg, remote_heads
1316 1328
1317 1329 def push_addchangegroup(self, remote, force, revs):
1318 1330 lock = remote.lock()
1319 1331
1320 1332 ret = self.prepush(remote, force, revs)
1321 1333 if ret[0] is not None:
1322 1334 cg, remote_heads = ret
1323 1335 return remote.addchangegroup(cg, 'push', self.url())
1324 1336 return ret[1]
1325 1337
1326 1338 def push_unbundle(self, remote, force, revs):
1327 1339 # local repo finds heads on server, finds out what revs it
1328 1340 # must push. once revs transferred, if server finds it has
1329 1341 # different heads (someone else won commit/push race), server
1330 1342 # aborts.
1331 1343
1332 1344 ret = self.prepush(remote, force, revs)
1333 1345 if ret[0] is not None:
1334 1346 cg, remote_heads = ret
1335 1347 if force: remote_heads = ['force']
1336 1348 return remote.unbundle(cg, remote_heads, 'push')
1337 1349 return ret[1]
1338 1350
1339 1351 def changegroupinfo(self, nodes):
1340 1352 self.ui.note(_("%d changesets found\n") % len(nodes))
1341 1353 if self.ui.debugflag:
1342 1354 self.ui.debug(_("List of changesets:\n"))
1343 1355 for node in nodes:
1344 1356 self.ui.debug("%s\n" % hex(node))
1345 1357
1346 1358 def changegroupsubset(self, bases, heads, source):
1347 1359 """This function generates a changegroup consisting of all the nodes
1348 1360 that are descendents of any of the bases, and ancestors of any of
1349 1361 the heads.
1350 1362
1351 1363 It is fairly complex as determining which filenodes and which
1352 1364 manifest nodes need to be included for the changeset to be complete
1353 1365 is non-trivial.
1354 1366
1355 1367 Another wrinkle is doing the reverse, figuring out which changeset in
1356 1368 the changegroup a particular filenode or manifestnode belongs to."""
1357 1369
1358 1370 self.hook('preoutgoing', throw=True, source=source)
1359 1371
1360 1372 # Set up some initial variables
1361 1373 # Make it easy to refer to self.changelog
1362 1374 cl = self.changelog
1363 1375 # msng is short for missing - compute the list of changesets in this
1364 1376 # changegroup.
1365 1377 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1366 1378 self.changegroupinfo(msng_cl_lst)
1367 1379 # Some bases may turn out to be superfluous, and some heads may be
1368 1380 # too. nodesbetween will return the minimal set of bases and heads
1369 1381 # necessary to re-create the changegroup.
1370 1382
1371 1383 # Known heads are the list of heads that it is assumed the recipient
1372 1384 # of this changegroup will know about.
1373 1385 knownheads = {}
1374 1386 # We assume that all parents of bases are known heads.
1375 1387 for n in bases:
1376 1388 for p in cl.parents(n):
1377 1389 if p != nullid:
1378 1390 knownheads[p] = 1
1379 1391 knownheads = knownheads.keys()
1380 1392 if knownheads:
1381 1393 # Now that we know what heads are known, we can compute which
1382 1394 # changesets are known. The recipient must know about all
1383 1395 # changesets required to reach the known heads from the null
1384 1396 # changeset.
1385 1397 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1386 1398 junk = None
1387 1399 # Transform the list into an ersatz set.
1388 1400 has_cl_set = dict.fromkeys(has_cl_set)
1389 1401 else:
1390 1402 # If there were no known heads, the recipient cannot be assumed to
1391 1403 # know about any changesets.
1392 1404 has_cl_set = {}
1393 1405
1394 1406 # Make it easy to refer to self.manifest
1395 1407 mnfst = self.manifest
1396 1408 # We don't know which manifests are missing yet
1397 1409 msng_mnfst_set = {}
1398 1410 # Nor do we know which filenodes are missing.
1399 1411 msng_filenode_set = {}
1400 1412
1401 1413 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1402 1414 junk = None
1403 1415
1404 1416 # A changeset always belongs to itself, so the changenode lookup
1405 1417 # function for a changenode is identity.
1406 1418 def identity(x):
1407 1419 return x
1408 1420
1409 1421 # A function generating function. Sets up an environment for the
1410 1422 # inner function.
1411 1423 def cmp_by_rev_func(revlog):
1412 1424 # Compare two nodes by their revision number in the environment's
1413 1425 # revision history. Since the revision number both represents the
1414 1426 # most efficient order to read the nodes in, and represents a
1415 1427 # topological sorting of the nodes, this function is often useful.
1416 1428 def cmp_by_rev(a, b):
1417 1429 return cmp(revlog.rev(a), revlog.rev(b))
1418 1430 return cmp_by_rev
1419 1431
1420 1432 # If we determine that a particular file or manifest node must be a
1421 1433 # node that the recipient of the changegroup will already have, we can
1422 1434 # also assume the recipient will have all the parents. This function
1423 1435 # prunes them from the set of missing nodes.
1424 1436 def prune_parents(revlog, hasset, msngset):
1425 1437 haslst = hasset.keys()
1426 1438 haslst.sort(cmp_by_rev_func(revlog))
1427 1439 for node in haslst:
1428 1440 parentlst = [p for p in revlog.parents(node) if p != nullid]
1429 1441 while parentlst:
1430 1442 n = parentlst.pop()
1431 1443 if n not in hasset:
1432 1444 hasset[n] = 1
1433 1445 p = [p for p in revlog.parents(n) if p != nullid]
1434 1446 parentlst.extend(p)
1435 1447 for n in hasset:
1436 1448 msngset.pop(n, None)
1437 1449
1438 1450 # This is a function generating function used to set up an environment
1439 1451 # for the inner function to execute in.
1440 1452 def manifest_and_file_collector(changedfileset):
1441 1453 # This is an information gathering function that gathers
1442 1454 # information from each changeset node that goes out as part of
1443 1455 # the changegroup. The information gathered is a list of which
1444 1456 # manifest nodes are potentially required (the recipient may
1445 1457 # already have them) and total list of all files which were
1446 1458 # changed in any changeset in the changegroup.
1447 1459 #
1448 1460 # We also remember the first changenode we saw any manifest
1449 1461 # referenced by so we can later determine which changenode 'owns'
1450 1462 # the manifest.
1451 1463 def collect_manifests_and_files(clnode):
1452 1464 c = cl.read(clnode)
1453 1465 for f in c[3]:
1454 1466 # This is to make sure we only have one instance of each
1455 1467 # filename string for each filename.
1456 1468 changedfileset.setdefault(f, f)
1457 1469 msng_mnfst_set.setdefault(c[0], clnode)
1458 1470 return collect_manifests_and_files
1459 1471
1460 1472 # Figure out which manifest nodes (of the ones we think might be part
1461 1473 # of the changegroup) the recipient must know about and remove them
1462 1474 # from the changegroup.
1463 1475 def prune_manifests():
1464 1476 has_mnfst_set = {}
1465 1477 for n in msng_mnfst_set:
1466 1478 # If a 'missing' manifest thinks it belongs to a changenode
1467 1479 # the recipient is assumed to have, obviously the recipient
1468 1480 # must have that manifest.
1469 1481 linknode = cl.node(mnfst.linkrev(n))
1470 1482 if linknode in has_cl_set:
1471 1483 has_mnfst_set[n] = 1
1472 1484 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1473 1485
1474 1486 # Use the information collected in collect_manifests_and_files to say
1475 1487 # which changenode any manifestnode belongs to.
1476 1488 def lookup_manifest_link(mnfstnode):
1477 1489 return msng_mnfst_set[mnfstnode]
1478 1490
1479 1491 # A function generating function that sets up the initial environment
1480 1492 # the inner function.
1481 1493 def filenode_collector(changedfiles):
1482 1494 next_rev = [0]
1483 1495 # This gathers information from each manifestnode included in the
1484 1496 # changegroup about which filenodes the manifest node references
1485 1497 # so we can include those in the changegroup too.
1486 1498 #
1487 1499 # It also remembers which changenode each filenode belongs to. It
1488 1500 # does this by assuming the a filenode belongs to the changenode
1489 1501 # the first manifest that references it belongs to.
1490 1502 def collect_msng_filenodes(mnfstnode):
1491 1503 r = mnfst.rev(mnfstnode)
1492 1504 if r == next_rev[0]:
1493 1505 # If the last rev we looked at was the one just previous,
1494 1506 # we only need to see a diff.
1495 1507 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1496 1508 # For each line in the delta
1497 1509 for dline in delta.splitlines():
1498 1510 # get the filename and filenode for that line
1499 1511 f, fnode = dline.split('\0')
1500 1512 fnode = bin(fnode[:40])
1501 1513 f = changedfiles.get(f, None)
1502 1514 # And if the file is in the list of files we care
1503 1515 # about.
1504 1516 if f is not None:
1505 1517 # Get the changenode this manifest belongs to
1506 1518 clnode = msng_mnfst_set[mnfstnode]
1507 1519 # Create the set of filenodes for the file if
1508 1520 # there isn't one already.
1509 1521 ndset = msng_filenode_set.setdefault(f, {})
1510 1522 # And set the filenode's changelog node to the
1511 1523 # manifest's if it hasn't been set already.
1512 1524 ndset.setdefault(fnode, clnode)
1513 1525 else:
1514 1526 # Otherwise we need a full manifest.
1515 1527 m = mnfst.read(mnfstnode)
1516 1528 # For every file in we care about.
1517 1529 for f in changedfiles:
1518 1530 fnode = m.get(f, None)
1519 1531 # If it's in the manifest
1520 1532 if fnode is not None:
1521 1533 # See comments above.
1522 1534 clnode = msng_mnfst_set[mnfstnode]
1523 1535 ndset = msng_filenode_set.setdefault(f, {})
1524 1536 ndset.setdefault(fnode, clnode)
1525 1537 # Remember the revision we hope to see next.
1526 1538 next_rev[0] = r + 1
1527 1539 return collect_msng_filenodes
1528 1540
1529 1541 # We have a list of filenodes we think we need for a file, lets remove
1530 1542 # all those we now the recipient must have.
1531 1543 def prune_filenodes(f, filerevlog):
1532 1544 msngset = msng_filenode_set[f]
1533 1545 hasset = {}
1534 1546 # If a 'missing' filenode thinks it belongs to a changenode we
1535 1547 # assume the recipient must have, then the recipient must have
1536 1548 # that filenode.
1537 1549 for n in msngset:
1538 1550 clnode = cl.node(filerevlog.linkrev(n))
1539 1551 if clnode in has_cl_set:
1540 1552 hasset[n] = 1
1541 1553 prune_parents(filerevlog, hasset, msngset)
1542 1554
1543 1555 # A function generator function that sets up the a context for the
1544 1556 # inner function.
1545 1557 def lookup_filenode_link_func(fname):
1546 1558 msngset = msng_filenode_set[fname]
1547 1559 # Lookup the changenode the filenode belongs to.
1548 1560 def lookup_filenode_link(fnode):
1549 1561 return msngset[fnode]
1550 1562 return lookup_filenode_link
1551 1563
1552 1564 # Now that we have all theses utility functions to help out and
1553 1565 # logically divide up the task, generate the group.
1554 1566 def gengroup():
1555 1567 # The set of changed files starts empty.
1556 1568 changedfiles = {}
1557 1569 # Create a changenode group generator that will call our functions
1558 1570 # back to lookup the owning changenode and collect information.
1559 1571 group = cl.group(msng_cl_lst, identity,
1560 1572 manifest_and_file_collector(changedfiles))
1561 1573 for chnk in group:
1562 1574 yield chnk
1563 1575
1564 1576 # The list of manifests has been collected by the generator
1565 1577 # calling our functions back.
1566 1578 prune_manifests()
1567 1579 msng_mnfst_lst = msng_mnfst_set.keys()
1568 1580 # Sort the manifestnodes by revision number.
1569 1581 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1570 1582 # Create a generator for the manifestnodes that calls our lookup
1571 1583 # and data collection functions back.
1572 1584 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1573 1585 filenode_collector(changedfiles))
1574 1586 for chnk in group:
1575 1587 yield chnk
1576 1588
1577 1589 # These are no longer needed, dereference and toss the memory for
1578 1590 # them.
1579 1591 msng_mnfst_lst = None
1580 1592 msng_mnfst_set.clear()
1581 1593
1582 1594 changedfiles = changedfiles.keys()
1583 1595 changedfiles.sort()
1584 1596 # Go through all our files in order sorted by name.
1585 1597 for fname in changedfiles:
1586 1598 filerevlog = self.file(fname)
1587 1599 # Toss out the filenodes that the recipient isn't really
1588 1600 # missing.
1589 1601 if msng_filenode_set.has_key(fname):
1590 1602 prune_filenodes(fname, filerevlog)
1591 1603 msng_filenode_lst = msng_filenode_set[fname].keys()
1592 1604 else:
1593 1605 msng_filenode_lst = []
1594 1606 # If any filenodes are left, generate the group for them,
1595 1607 # otherwise don't bother.
1596 1608 if len(msng_filenode_lst) > 0:
1597 1609 yield changegroup.genchunk(fname)
1598 1610 # Sort the filenodes by their revision #
1599 1611 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1600 1612 # Create a group generator and only pass in a changenode
1601 1613 # lookup function as we need to collect no information
1602 1614 # from filenodes.
1603 1615 group = filerevlog.group(msng_filenode_lst,
1604 1616 lookup_filenode_link_func(fname))
1605 1617 for chnk in group:
1606 1618 yield chnk
1607 1619 if msng_filenode_set.has_key(fname):
1608 1620 # Don't need this anymore, toss it to free memory.
1609 1621 del msng_filenode_set[fname]
1610 1622 # Signal that no more groups are left.
1611 1623 yield changegroup.closechunk()
1612 1624
1613 1625 if msng_cl_lst:
1614 1626 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1615 1627
1616 1628 return util.chunkbuffer(gengroup())
1617 1629
1618 1630 def changegroup(self, basenodes, source):
1619 1631 """Generate a changegroup of all nodes that we have that a recipient
1620 1632 doesn't.
1621 1633
1622 1634 This is much easier than the previous function as we can assume that
1623 1635 the recipient has any changenode we aren't sending them."""
1624 1636
1625 1637 self.hook('preoutgoing', throw=True, source=source)
1626 1638
1627 1639 cl = self.changelog
1628 1640 nodes = cl.nodesbetween(basenodes, None)[0]
1629 1641 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1630 1642 self.changegroupinfo(nodes)
1631 1643
1632 1644 def identity(x):
1633 1645 return x
1634 1646
1635 1647 def gennodelst(revlog):
1636 1648 for r in xrange(0, revlog.count()):
1637 1649 n = revlog.node(r)
1638 1650 if revlog.linkrev(n) in revset:
1639 1651 yield n
1640 1652
1641 1653 def changed_file_collector(changedfileset):
1642 1654 def collect_changed_files(clnode):
1643 1655 c = cl.read(clnode)
1644 1656 for fname in c[3]:
1645 1657 changedfileset[fname] = 1
1646 1658 return collect_changed_files
1647 1659
1648 1660 def lookuprevlink_func(revlog):
1649 1661 def lookuprevlink(n):
1650 1662 return cl.node(revlog.linkrev(n))
1651 1663 return lookuprevlink
1652 1664
1653 1665 def gengroup():
1654 1666 # construct a list of all changed files
1655 1667 changedfiles = {}
1656 1668
1657 1669 for chnk in cl.group(nodes, identity,
1658 1670 changed_file_collector(changedfiles)):
1659 1671 yield chnk
1660 1672 changedfiles = changedfiles.keys()
1661 1673 changedfiles.sort()
1662 1674
1663 1675 mnfst = self.manifest
1664 1676 nodeiter = gennodelst(mnfst)
1665 1677 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1666 1678 yield chnk
1667 1679
1668 1680 for fname in changedfiles:
1669 1681 filerevlog = self.file(fname)
1670 1682 nodeiter = gennodelst(filerevlog)
1671 1683 nodeiter = list(nodeiter)
1672 1684 if nodeiter:
1673 1685 yield changegroup.genchunk(fname)
1674 1686 lookup = lookuprevlink_func(filerevlog)
1675 1687 for chnk in filerevlog.group(nodeiter, lookup):
1676 1688 yield chnk
1677 1689
1678 1690 yield changegroup.closechunk()
1679 1691
1680 1692 if nodes:
1681 1693 self.hook('outgoing', node=hex(nodes[0]), source=source)
1682 1694
1683 1695 return util.chunkbuffer(gengroup())
1684 1696
1685 1697 def addchangegroup(self, source, srctype, url):
1686 1698 """add changegroup to repo.
1687 1699 returns number of heads modified or added + 1."""
1688 1700
1689 1701 def csmap(x):
1690 1702 self.ui.debug(_("add changeset %s\n") % short(x))
1691 1703 return cl.count()
1692 1704
1693 1705 def revmap(x):
1694 1706 return cl.rev(x)
1695 1707
1696 1708 if not source:
1697 1709 return 0
1698 1710
1699 1711 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1700 1712
1701 1713 changesets = files = revisions = 0
1702 1714
1703 1715 tr = self.transaction()
1704 1716
1705 1717 # write changelog data to temp files so concurrent readers will not see
1706 1718 # inconsistent view
1707 1719 cl = None
1708 1720 try:
1709 1721 cl = appendfile.appendchangelog(self.sopener,
1710 1722 self.changelog.version)
1711 1723
1712 1724 oldheads = len(cl.heads())
1713 1725
1714 1726 # pull off the changeset group
1715 1727 self.ui.status(_("adding changesets\n"))
1716 1728 cor = cl.count() - 1
1717 1729 chunkiter = changegroup.chunkiter(source)
1718 1730 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1719 1731 raise util.Abort(_("received changelog group is empty"))
1720 1732 cnr = cl.count() - 1
1721 1733 changesets = cnr - cor
1722 1734
1723 1735 # pull off the manifest group
1724 1736 self.ui.status(_("adding manifests\n"))
1725 1737 chunkiter = changegroup.chunkiter(source)
1726 1738 # no need to check for empty manifest group here:
1727 1739 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1728 1740 # no new manifest will be created and the manifest group will
1729 1741 # be empty during the pull
1730 1742 self.manifest.addgroup(chunkiter, revmap, tr)
1731 1743
1732 1744 # process the files
1733 1745 self.ui.status(_("adding file changes\n"))
1734 1746 while 1:
1735 1747 f = changegroup.getchunk(source)
1736 1748 if not f:
1737 1749 break
1738 1750 self.ui.debug(_("adding %s revisions\n") % f)
1739 1751 fl = self.file(f)
1740 1752 o = fl.count()
1741 1753 chunkiter = changegroup.chunkiter(source)
1742 1754 if fl.addgroup(chunkiter, revmap, tr) is None:
1743 1755 raise util.Abort(_("received file revlog group is empty"))
1744 1756 revisions += fl.count() - o
1745 1757 files += 1
1746 1758
1747 1759 cl.writedata()
1748 1760 finally:
1749 1761 if cl:
1750 1762 cl.cleanup()
1751 1763
1752 1764 # make changelog see real files again
1753 1765 self.changelog = changelog.changelog(self.sopener,
1754 1766 self.changelog.version)
1755 1767 self.changelog.checkinlinesize(tr)
1756 1768
1757 1769 newheads = len(self.changelog.heads())
1758 1770 heads = ""
1759 1771 if oldheads and newheads != oldheads:
1760 1772 heads = _(" (%+d heads)") % (newheads - oldheads)
1761 1773
1762 1774 self.ui.status(_("added %d changesets"
1763 1775 " with %d changes to %d files%s\n")
1764 1776 % (changesets, revisions, files, heads))
1765 1777
1766 1778 if changesets > 0:
1767 1779 self.hook('pretxnchangegroup', throw=True,
1768 1780 node=hex(self.changelog.node(cor+1)), source=srctype,
1769 1781 url=url)
1770 1782
1771 1783 tr.close()
1772 1784
1773 1785 if changesets > 0:
1774 1786 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1775 1787 source=srctype, url=url)
1776 1788
1777 1789 for i in xrange(cor + 1, cnr + 1):
1778 1790 self.hook("incoming", node=hex(self.changelog.node(i)),
1779 1791 source=srctype, url=url)
1780 1792
1781 1793 return newheads - oldheads + 1
1782 1794
1783 1795
1784 1796 def stream_in(self, remote):
1785 1797 fp = remote.stream_out()
1786 1798 resp = int(fp.readline())
1787 1799 if resp != 0:
1788 1800 raise util.Abort(_('operation forbidden by server'))
1789 1801 self.ui.status(_('streaming all changes\n'))
1790 1802 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1791 1803 self.ui.status(_('%d files to transfer, %s of data\n') %
1792 1804 (total_files, util.bytecount(total_bytes)))
1793 1805 start = time.time()
1794 1806 for i in xrange(total_files):
1795 1807 name, size = fp.readline().split('\0', 1)
1796 1808 size = int(size)
1797 1809 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1798 1810 ofp = self.sopener(name, 'w')
1799 1811 for chunk in util.filechunkiter(fp, limit=size):
1800 1812 ofp.write(chunk)
1801 1813 ofp.close()
1802 1814 elapsed = time.time() - start
1803 1815 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1804 1816 (util.bytecount(total_bytes), elapsed,
1805 1817 util.bytecount(total_bytes / elapsed)))
1806 1818 self.reload()
1807 1819 return len(self.heads()) + 1
1808 1820
1809 1821 def clone(self, remote, heads=[], stream=False):
1810 1822 '''clone remote repository.
1811 1823
1812 1824 keyword arguments:
1813 1825 heads: list of revs to clone (forces use of pull)
1814 1826 stream: use streaming clone if possible'''
1815 1827
1816 1828 # now, all clients that can request uncompressed clones can
1817 1829 # read repo formats supported by all servers that can serve
1818 1830 # them.
1819 1831
1820 1832 # if revlog format changes, client will have to check version
1821 1833 # and format flags on "stream" capability, and use
1822 1834 # uncompressed only if compatible.
1823 1835
1824 1836 if stream and not heads and remote.capable('stream'):
1825 1837 return self.stream_in(remote)
1826 1838 return self.pull(remote, heads)
1827 1839
1828 1840 # used to avoid circular references so destructors work
1829 1841 def aftertrans(base):
1830 1842 p = base
1831 1843 def a():
1832 1844 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1833 1845 util.rename(os.path.join(p, "journal.dirstate"),
1834 1846 os.path.join(p, "undo.dirstate"))
1835 1847 return a
1836 1848
1837 1849 def instance(ui, path, create):
1838 1850 return localrepository(ui, util.drop_scheme('file', path), create)
1839 1851
1840 1852 def islocal(path):
1841 1853 return True
General Comments 0
You need to be logged in to leave comments. Login now