##// END OF EJS Templates
Use revlogng and inlined data files by default...
mason@suse.com -
r2222:c9e264b1 default
parent child Browse files
Show More
@@ -1,108 +1,108 b''
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os
9 9 from revlog import *
10 10 from demandload import *
11 11 demandload(globals(), "bdiff")
12 12
13 13 class filelog(revlog):
14 def __init__(self, opener, path, defversion=0):
14 def __init__(self, opener, path, defversion=REVLOG_DEFAULT_VERSION):
15 15 revlog.__init__(self, opener,
16 16 os.path.join("data", self.encodedir(path + ".i")),
17 17 os.path.join("data", self.encodedir(path + ".d")),
18 18 defversion)
19 19
20 20 # This avoids a collision between a file named foo and a dir named
21 21 # foo.i or foo.d
22 22 def encodedir(self, path):
23 23 return (path
24 24 .replace(".hg/", ".hg.hg/")
25 25 .replace(".i/", ".i.hg/")
26 26 .replace(".d/", ".d.hg/"))
27 27
28 28 def decodedir(self, path):
29 29 return (path
30 30 .replace(".d.hg/", ".d/")
31 31 .replace(".i.hg/", ".i/")
32 32 .replace(".hg.hg/", ".hg/"))
33 33
34 34 def read(self, node):
35 35 t = self.revision(node)
36 36 if not t.startswith('\1\n'):
37 37 return t
38 38 s = t.find('\1\n', 2)
39 39 return t[s+2:]
40 40
41 41 def readmeta(self, node):
42 42 t = self.revision(node)
43 43 if not t.startswith('\1\n'):
44 44 return {}
45 45 s = t.find('\1\n', 2)
46 46 mt = t[2:s]
47 47 m = {}
48 48 for l in mt.splitlines():
49 49 k, v = l.split(": ", 1)
50 50 m[k] = v
51 51 return m
52 52
53 53 def add(self, text, meta, transaction, link, p1=None, p2=None):
54 54 if meta or text.startswith('\1\n'):
55 55 mt = ""
56 56 if meta:
57 57 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
58 58 text = "\1\n%s\1\n%s" % ("".join(mt), text)
59 59 return self.addrevision(text, transaction, link, p1, p2)
60 60
61 61 def renamed(self, node):
62 62 if self.parents(node)[0] != nullid:
63 63 return False
64 64 m = self.readmeta(node)
65 65 if m and m.has_key("copy"):
66 66 return (m["copy"], bin(m["copyrev"]))
67 67 return False
68 68
69 69 def annotate(self, node):
70 70
71 71 def decorate(text, rev):
72 72 return ([rev] * len(text.splitlines()), text)
73 73
74 74 def pair(parent, child):
75 75 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
76 76 child[0][b1:b2] = parent[0][a1:a2]
77 77 return child
78 78
79 79 # find all ancestors
80 80 needed = {node:1}
81 81 visit = [node]
82 82 while visit:
83 83 n = visit.pop(0)
84 84 for p in self.parents(n):
85 85 if p not in needed:
86 86 needed[p] = 1
87 87 visit.append(p)
88 88 else:
89 89 # count how many times we'll use this
90 90 needed[p] += 1
91 91
92 92 # sort by revision which is a topological order
93 93 visit = [ (self.rev(n), n) for n in needed.keys() ]
94 94 visit.sort()
95 95 hist = {}
96 96
97 97 for r,n in visit:
98 98 curr = decorate(self.read(n), self.linkrev(n))
99 99 for p in self.parents(n):
100 100 if p != nullid:
101 101 curr = pair(hist[p], curr)
102 102 # trim the history of unneeded revs
103 103 needed[p] -= 1
104 104 if not needed[p]:
105 105 del hist[p]
106 106 hist[n] = curr
107 107
108 108 return zip(hist[n][0], hist[n][1].splitlines(1))
@@ -1,2072 +1,2076 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "revlog traceback")
16 16
17 17 class localrepository(object):
18 18 def __del__(self):
19 19 self.transhandle = None
20 20 def __init__(self, parentui, path=None, create=0):
21 21 if not path:
22 22 p = os.getcwd()
23 23 while not os.path.isdir(os.path.join(p, ".hg")):
24 24 oldp = p
25 25 p = os.path.dirname(p)
26 26 if p == oldp:
27 27 raise repo.RepoError(_("no repo found"))
28 28 path = p
29 29 self.path = os.path.join(path, ".hg")
30 30
31 31 if not create and not os.path.isdir(self.path):
32 32 raise repo.RepoError(_("repository %s not found") % path)
33 33
34 34 self.root = os.path.abspath(path)
35 35 self.origroot = path
36 36 self.ui = ui.ui(parentui=parentui)
37 37 self.opener = util.opener(self.path)
38 38 self.wopener = util.opener(self.root)
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 except IOError:
43 43 pass
44 44
45 45 v = self.ui.revlogopts
46 self.revlogversion = int(v.get('format', revlog.REVLOGV0))
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 fl = v.get('flags', None)
48 49 flags = 0
49 for x in v.get('flags', "").split():
50 if fl != None:
51 for x in fl.split():
50 52 flags |= revlog.flagstr(x)
53 elif self.revlogv1:
54 flags = revlog.REVLOG_DEFAULT_FLAGS
51 55
52 56 v = self.revlogversion | flags
53 57 self.manifest = manifest.manifest(self.opener, v)
54 58 self.changelog = changelog.changelog(self.opener, v)
55 59
56 60 # the changelog might not have the inline index flag
57 61 # on. If the format of the changelog is the same as found in
58 62 # .hgrc, apply any flags found in the .hgrc as well.
59 63 # Otherwise, just version from the changelog
60 64 v = self.changelog.version
61 65 if v == self.revlogversion:
62 66 v |= flags
63 67 self.revlogversion = v
64 68
65 69 self.tagscache = None
66 70 self.nodetagscache = None
67 71 self.encodepats = None
68 72 self.decodepats = None
69 73 self.transhandle = None
70 74
71 75 if create:
72 76 os.mkdir(self.path)
73 77 os.mkdir(self.join("data"))
74 78
75 79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
76 80
77 81 def hook(self, name, throw=False, **args):
78 82 def callhook(hname, funcname):
79 83 '''call python hook. hook is callable object, looked up as
80 84 name in python module. if callable returns "true", hook
81 85 fails, else passes. if hook raises exception, treated as
82 86 hook failure. exception propagates if throw is "true".
83 87
84 88 reason for "true" meaning "hook failed" is so that
85 89 unmodified commands (e.g. mercurial.commands.update) can
86 90 be run as hooks without wrappers to convert return values.'''
87 91
88 92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
89 93 d = funcname.rfind('.')
90 94 if d == -1:
91 95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
92 96 % (hname, funcname))
93 97 modname = funcname[:d]
94 98 try:
95 99 obj = __import__(modname)
96 100 except ImportError:
97 101 raise util.Abort(_('%s hook is invalid '
98 102 '(import of "%s" failed)') %
99 103 (hname, modname))
100 104 try:
101 105 for p in funcname.split('.')[1:]:
102 106 obj = getattr(obj, p)
103 107 except AttributeError, err:
104 108 raise util.Abort(_('%s hook is invalid '
105 109 '("%s" is not defined)') %
106 110 (hname, funcname))
107 111 if not callable(obj):
108 112 raise util.Abort(_('%s hook is invalid '
109 113 '("%s" is not callable)') %
110 114 (hname, funcname))
111 115 try:
112 116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
113 117 except (KeyboardInterrupt, util.SignalInterrupt):
114 118 raise
115 119 except Exception, exc:
116 120 if isinstance(exc, util.Abort):
117 121 self.ui.warn(_('error: %s hook failed: %s\n') %
118 122 (hname, exc.args[0] % exc.args[1:]))
119 123 else:
120 124 self.ui.warn(_('error: %s hook raised an exception: '
121 125 '%s\n') % (hname, exc))
122 126 if throw:
123 127 raise
124 128 if self.ui.traceback:
125 129 traceback.print_exc()
126 130 return True
127 131 if r:
128 132 if throw:
129 133 raise util.Abort(_('%s hook failed') % hname)
130 134 self.ui.warn(_('warning: %s hook failed\n') % hname)
131 135 return r
132 136
133 137 def runhook(name, cmd):
134 138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
135 139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
136 140 [(k.upper(), v) for k, v in args.iteritems()])
137 141 r = util.system(cmd, environ=env, cwd=self.root)
138 142 if r:
139 143 desc, r = util.explain_exit(r)
140 144 if throw:
141 145 raise util.Abort(_('%s hook %s') % (name, desc))
142 146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
143 147 return r
144 148
145 149 r = False
146 150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
147 151 if hname.split(".", 1)[0] == name and cmd]
148 152 hooks.sort()
149 153 for hname, cmd in hooks:
150 154 if cmd.startswith('python:'):
151 155 r = callhook(hname, cmd[7:].strip()) or r
152 156 else:
153 157 r = runhook(hname, cmd) or r
154 158 return r
155 159
156 160 def tags(self):
157 161 '''return a mapping of tag to node'''
158 162 if not self.tagscache:
159 163 self.tagscache = {}
160 164
161 165 def parsetag(line, context):
162 166 if not line:
163 167 return
164 168 s = l.split(" ", 1)
165 169 if len(s) != 2:
166 170 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
167 171 return
168 172 node, key = s
169 173 try:
170 174 bin_n = bin(node)
171 175 except TypeError:
172 176 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
173 177 return
174 178 if bin_n not in self.changelog.nodemap:
175 179 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
176 180 return
177 181 self.tagscache[key.strip()] = bin_n
178 182
179 183 # read each head of the tags file, ending with the tip
180 184 # and add each tag found to the map, with "newer" ones
181 185 # taking precedence
182 186 fl = self.file(".hgtags")
183 187 h = fl.heads()
184 188 h.reverse()
185 189 for r in h:
186 190 count = 0
187 191 for l in fl.read(r).splitlines():
188 192 count += 1
189 193 parsetag(l, ".hgtags:%d" % count)
190 194
191 195 try:
192 196 f = self.opener("localtags")
193 197 count = 0
194 198 for l in f:
195 199 count += 1
196 200 parsetag(l, "localtags:%d" % count)
197 201 except IOError:
198 202 pass
199 203
200 204 self.tagscache['tip'] = self.changelog.tip()
201 205
202 206 return self.tagscache
203 207
204 208 def tagslist(self):
205 209 '''return a list of tags ordered by revision'''
206 210 l = []
207 211 for t, n in self.tags().items():
208 212 try:
209 213 r = self.changelog.rev(n)
210 214 except:
211 215 r = -2 # sort to the beginning of the list if unknown
212 216 l.append((r, t, n))
213 217 l.sort()
214 218 return [(t, n) for r, t, n in l]
215 219
216 220 def nodetags(self, node):
217 221 '''return the tags associated with a node'''
218 222 if not self.nodetagscache:
219 223 self.nodetagscache = {}
220 224 for t, n in self.tags().items():
221 225 self.nodetagscache.setdefault(n, []).append(t)
222 226 return self.nodetagscache.get(node, [])
223 227
224 228 def lookup(self, key):
225 229 try:
226 230 return self.tags()[key]
227 231 except KeyError:
228 232 try:
229 233 return self.changelog.lookup(key)
230 234 except:
231 235 raise repo.RepoError(_("unknown revision '%s'") % key)
232 236
233 237 def dev(self):
234 238 return os.stat(self.path).st_dev
235 239
236 240 def local(self):
237 241 return True
238 242
239 243 def join(self, f):
240 244 return os.path.join(self.path, f)
241 245
242 246 def wjoin(self, f):
243 247 return os.path.join(self.root, f)
244 248
245 249 def file(self, f):
246 250 if f[0] == '/':
247 251 f = f[1:]
248 252 return filelog.filelog(self.opener, f, self.revlogversion)
249 253
250 254 def getcwd(self):
251 255 return self.dirstate.getcwd()
252 256
253 257 def wfile(self, f, mode='r'):
254 258 return self.wopener(f, mode)
255 259
256 260 def wread(self, filename):
257 261 if self.encodepats == None:
258 262 l = []
259 263 for pat, cmd in self.ui.configitems("encode"):
260 264 mf = util.matcher(self.root, "", [pat], [], [])[1]
261 265 l.append((mf, cmd))
262 266 self.encodepats = l
263 267
264 268 data = self.wopener(filename, 'r').read()
265 269
266 270 for mf, cmd in self.encodepats:
267 271 if mf(filename):
268 272 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
269 273 data = util.filter(data, cmd)
270 274 break
271 275
272 276 return data
273 277
274 278 def wwrite(self, filename, data, fd=None):
275 279 if self.decodepats == None:
276 280 l = []
277 281 for pat, cmd in self.ui.configitems("decode"):
278 282 mf = util.matcher(self.root, "", [pat], [], [])[1]
279 283 l.append((mf, cmd))
280 284 self.decodepats = l
281 285
282 286 for mf, cmd in self.decodepats:
283 287 if mf(filename):
284 288 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
285 289 data = util.filter(data, cmd)
286 290 break
287 291
288 292 if fd:
289 293 return fd.write(data)
290 294 return self.wopener(filename, 'w').write(data)
291 295
292 296 def transaction(self):
293 297 tr = self.transhandle
294 298 if tr != None and tr.running():
295 299 return tr.nest()
296 300
297 301 # save dirstate for undo
298 302 try:
299 303 ds = self.opener("dirstate").read()
300 304 except IOError:
301 305 ds = ""
302 306 self.opener("journal.dirstate", "w").write(ds)
303 307
304 308 tr = transaction.transaction(self.ui.warn, self.opener,
305 309 self.join("journal"),
306 310 aftertrans(self.path))
307 311 self.transhandle = tr
308 312 return tr
309 313
310 314 def recover(self):
311 315 l = self.lock()
312 316 if os.path.exists(self.join("journal")):
313 317 self.ui.status(_("rolling back interrupted transaction\n"))
314 318 transaction.rollback(self.opener, self.join("journal"))
315 319 self.reload()
316 320 return True
317 321 else:
318 322 self.ui.warn(_("no interrupted transaction available\n"))
319 323 return False
320 324
321 325 def undo(self, wlock=None):
322 326 if not wlock:
323 327 wlock = self.wlock()
324 328 l = self.lock()
325 329 if os.path.exists(self.join("undo")):
326 330 self.ui.status(_("rolling back last transaction\n"))
327 331 transaction.rollback(self.opener, self.join("undo"))
328 332 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
329 333 self.reload()
330 334 self.wreload()
331 335 else:
332 336 self.ui.warn(_("no undo information available\n"))
333 337
334 338 def wreload(self):
335 339 self.dirstate.read()
336 340
337 341 def reload(self):
338 342 self.changelog.load()
339 343 self.manifest.load()
340 344 self.tagscache = None
341 345 self.nodetagscache = None
342 346
343 347 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
344 348 desc=None):
345 349 try:
346 350 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
347 351 except lock.LockHeld, inst:
348 352 if not wait:
349 353 raise
350 354 self.ui.warn(_("waiting for lock on %s held by %s\n") %
351 355 (desc, inst.args[0]))
352 356 # default to 600 seconds timeout
353 357 l = lock.lock(self.join(lockname),
354 358 int(self.ui.config("ui", "timeout") or 600),
355 359 releasefn, desc=desc)
356 360 if acquirefn:
357 361 acquirefn()
358 362 return l
359 363
360 364 def lock(self, wait=1):
361 365 return self.do_lock("lock", wait, acquirefn=self.reload,
362 366 desc=_('repository %s') % self.origroot)
363 367
364 368 def wlock(self, wait=1):
365 369 return self.do_lock("wlock", wait, self.dirstate.write,
366 370 self.wreload,
367 371 desc=_('working directory of %s') % self.origroot)
368 372
369 373 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
370 374 "determine whether a new filenode is needed"
371 375 fp1 = manifest1.get(filename, nullid)
372 376 fp2 = manifest2.get(filename, nullid)
373 377
374 378 if fp2 != nullid:
375 379 # is one parent an ancestor of the other?
376 380 fpa = filelog.ancestor(fp1, fp2)
377 381 if fpa == fp1:
378 382 fp1, fp2 = fp2, nullid
379 383 elif fpa == fp2:
380 384 fp2 = nullid
381 385
382 386 # is the file unmodified from the parent? report existing entry
383 387 if fp2 == nullid and text == filelog.read(fp1):
384 388 return (fp1, None, None)
385 389
386 390 return (None, fp1, fp2)
387 391
388 392 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
389 393 orig_parent = self.dirstate.parents()[0] or nullid
390 394 p1 = p1 or self.dirstate.parents()[0] or nullid
391 395 p2 = p2 or self.dirstate.parents()[1] or nullid
392 396 c1 = self.changelog.read(p1)
393 397 c2 = self.changelog.read(p2)
394 398 m1 = self.manifest.read(c1[0])
395 399 mf1 = self.manifest.readflags(c1[0])
396 400 m2 = self.manifest.read(c2[0])
397 401 changed = []
398 402
399 403 if orig_parent == p1:
400 404 update_dirstate = 1
401 405 else:
402 406 update_dirstate = 0
403 407
404 408 if not wlock:
405 409 wlock = self.wlock()
406 410 l = self.lock()
407 411 tr = self.transaction()
408 412 mm = m1.copy()
409 413 mfm = mf1.copy()
410 414 linkrev = self.changelog.count()
411 415 for f in files:
412 416 try:
413 417 t = self.wread(f)
414 418 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
415 419 r = self.file(f)
416 420 mfm[f] = tm
417 421
418 422 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
419 423 if entry:
420 424 mm[f] = entry
421 425 continue
422 426
423 427 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
424 428 changed.append(f)
425 429 if update_dirstate:
426 430 self.dirstate.update([f], "n")
427 431 except IOError:
428 432 try:
429 433 del mm[f]
430 434 del mfm[f]
431 435 if update_dirstate:
432 436 self.dirstate.forget([f])
433 437 except:
434 438 # deleted from p2?
435 439 pass
436 440
437 441 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
438 442 user = user or self.ui.username()
439 443 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
440 444 tr.close()
441 445 if update_dirstate:
442 446 self.dirstate.setparents(n, nullid)
443 447
444 448 def commit(self, files=None, text="", user=None, date=None,
445 449 match=util.always, force=False, lock=None, wlock=None):
446 450 commit = []
447 451 remove = []
448 452 changed = []
449 453
450 454 if files:
451 455 for f in files:
452 456 s = self.dirstate.state(f)
453 457 if s in 'nmai':
454 458 commit.append(f)
455 459 elif s == 'r':
456 460 remove.append(f)
457 461 else:
458 462 self.ui.warn(_("%s not tracked!\n") % f)
459 463 else:
460 464 modified, added, removed, deleted, unknown = self.changes(match=match)
461 465 commit = modified + added
462 466 remove = removed
463 467
464 468 p1, p2 = self.dirstate.parents()
465 469 c1 = self.changelog.read(p1)
466 470 c2 = self.changelog.read(p2)
467 471 m1 = self.manifest.read(c1[0])
468 472 mf1 = self.manifest.readflags(c1[0])
469 473 m2 = self.manifest.read(c2[0])
470 474
471 475 if not commit and not remove and not force and p2 == nullid:
472 476 self.ui.status(_("nothing changed\n"))
473 477 return None
474 478
475 479 xp1 = hex(p1)
476 480 if p2 == nullid: xp2 = ''
477 481 else: xp2 = hex(p2)
478 482
479 483 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
480 484
481 485 if not wlock:
482 486 wlock = self.wlock()
483 487 if not lock:
484 488 lock = self.lock()
485 489 tr = self.transaction()
486 490
487 491 # check in files
488 492 new = {}
489 493 linkrev = self.changelog.count()
490 494 commit.sort()
491 495 for f in commit:
492 496 self.ui.note(f + "\n")
493 497 try:
494 498 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
495 499 t = self.wread(f)
496 500 except IOError:
497 501 self.ui.warn(_("trouble committing %s!\n") % f)
498 502 raise
499 503
500 504 r = self.file(f)
501 505
502 506 meta = {}
503 507 cp = self.dirstate.copied(f)
504 508 if cp:
505 509 meta["copy"] = cp
506 510 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
507 511 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
508 512 fp1, fp2 = nullid, nullid
509 513 else:
510 514 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
511 515 if entry:
512 516 new[f] = entry
513 517 continue
514 518
515 519 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
516 520 # remember what we've added so that we can later calculate
517 521 # the files to pull from a set of changesets
518 522 changed.append(f)
519 523
520 524 # update manifest
521 525 m1 = m1.copy()
522 526 m1.update(new)
523 527 for f in remove:
524 528 if f in m1:
525 529 del m1[f]
526 530 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
527 531 (new, remove))
528 532
529 533 # add changeset
530 534 new = new.keys()
531 535 new.sort()
532 536
533 537 user = user or self.ui.username()
534 538 if not text:
535 539 edittext = [""]
536 540 if p2 != nullid:
537 541 edittext.append("HG: branch merge")
538 542 edittext.extend(["HG: changed %s" % f for f in changed])
539 543 edittext.extend(["HG: removed %s" % f for f in remove])
540 544 if not changed and not remove:
541 545 edittext.append("HG: no files changed")
542 546 edittext.append("")
543 547 # run editor in the repository root
544 548 olddir = os.getcwd()
545 549 os.chdir(self.root)
546 550 edittext = self.ui.edit("\n".join(edittext), user)
547 551 os.chdir(olddir)
548 552 if not edittext.rstrip():
549 553 return None
550 554 text = edittext
551 555
552 556 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
553 557 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
554 558 parent2=xp2)
555 559 tr.close()
556 560
557 561 self.dirstate.setparents(n)
558 562 self.dirstate.update(new, "n")
559 563 self.dirstate.forget(remove)
560 564
561 565 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
562 566 return n
563 567
564 568 def walk(self, node=None, files=[], match=util.always, badmatch=None):
565 569 if node:
566 570 fdict = dict.fromkeys(files)
567 571 for fn in self.manifest.read(self.changelog.read(node)[0]):
568 572 fdict.pop(fn, None)
569 573 if match(fn):
570 574 yield 'm', fn
571 575 for fn in fdict:
572 576 if badmatch and badmatch(fn):
573 577 if match(fn):
574 578 yield 'b', fn
575 579 else:
576 580 self.ui.warn(_('%s: No such file in rev %s\n') % (
577 581 util.pathto(self.getcwd(), fn), short(node)))
578 582 else:
579 583 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
580 584 yield src, fn
581 585
582 586 def changes(self, node1=None, node2=None, files=[], match=util.always,
583 587 wlock=None, show_ignored=None):
584 588 """return changes between two nodes or node and working directory
585 589
586 590 If node1 is None, use the first dirstate parent instead.
587 591 If node2 is None, compare node1 with working directory.
588 592 """
589 593
590 594 def fcmp(fn, mf):
591 595 t1 = self.wread(fn)
592 596 t2 = self.file(fn).read(mf.get(fn, nullid))
593 597 return cmp(t1, t2)
594 598
595 599 def mfmatches(node):
596 600 change = self.changelog.read(node)
597 601 mf = dict(self.manifest.read(change[0]))
598 602 for fn in mf.keys():
599 603 if not match(fn):
600 604 del mf[fn]
601 605 return mf
602 606
603 607 if node1:
604 608 # read the manifest from node1 before the manifest from node2,
605 609 # so that we'll hit the manifest cache if we're going through
606 610 # all the revisions in parent->child order.
607 611 mf1 = mfmatches(node1)
608 612
609 613 # are we comparing the working directory?
610 614 if not node2:
611 615 if not wlock:
612 616 try:
613 617 wlock = self.wlock(wait=0)
614 618 except lock.LockException:
615 619 wlock = None
616 620 lookup, modified, added, removed, deleted, unknown, ignored = (
617 621 self.dirstate.changes(files, match, show_ignored))
618 622
619 623 # are we comparing working dir against its parent?
620 624 if not node1:
621 625 if lookup:
622 626 # do a full compare of any files that might have changed
623 627 mf2 = mfmatches(self.dirstate.parents()[0])
624 628 for f in lookup:
625 629 if fcmp(f, mf2):
626 630 modified.append(f)
627 631 elif wlock is not None:
628 632 self.dirstate.update([f], "n")
629 633 else:
630 634 # we are comparing working dir against non-parent
631 635 # generate a pseudo-manifest for the working dir
632 636 mf2 = mfmatches(self.dirstate.parents()[0])
633 637 for f in lookup + modified + added:
634 638 mf2[f] = ""
635 639 for f in removed:
636 640 if f in mf2:
637 641 del mf2[f]
638 642 else:
639 643 # we are comparing two revisions
640 644 deleted, unknown, ignored = [], [], []
641 645 mf2 = mfmatches(node2)
642 646
643 647 if node1:
644 648 # flush lists from dirstate before comparing manifests
645 649 modified, added = [], []
646 650
647 651 for fn in mf2:
648 652 if mf1.has_key(fn):
649 653 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
650 654 modified.append(fn)
651 655 del mf1[fn]
652 656 else:
653 657 added.append(fn)
654 658
655 659 removed = mf1.keys()
656 660
657 661 # sort and return results:
658 662 for l in modified, added, removed, deleted, unknown, ignored:
659 663 l.sort()
660 664 if show_ignored is None:
661 665 return (modified, added, removed, deleted, unknown)
662 666 else:
663 667 return (modified, added, removed, deleted, unknown, ignored)
664 668
665 669 def add(self, list, wlock=None):
666 670 if not wlock:
667 671 wlock = self.wlock()
668 672 for f in list:
669 673 p = self.wjoin(f)
670 674 if not os.path.exists(p):
671 675 self.ui.warn(_("%s does not exist!\n") % f)
672 676 elif not os.path.isfile(p):
673 677 self.ui.warn(_("%s not added: only files supported currently\n")
674 678 % f)
675 679 elif self.dirstate.state(f) in 'an':
676 680 self.ui.warn(_("%s already tracked!\n") % f)
677 681 else:
678 682 self.dirstate.update([f], "a")
679 683
680 684 def forget(self, list, wlock=None):
681 685 if not wlock:
682 686 wlock = self.wlock()
683 687 for f in list:
684 688 if self.dirstate.state(f) not in 'ai':
685 689 self.ui.warn(_("%s not added!\n") % f)
686 690 else:
687 691 self.dirstate.forget([f])
688 692
689 693 def remove(self, list, unlink=False, wlock=None):
690 694 if unlink:
691 695 for f in list:
692 696 try:
693 697 util.unlink(self.wjoin(f))
694 698 except OSError, inst:
695 699 if inst.errno != errno.ENOENT:
696 700 raise
697 701 if not wlock:
698 702 wlock = self.wlock()
699 703 for f in list:
700 704 p = self.wjoin(f)
701 705 if os.path.exists(p):
702 706 self.ui.warn(_("%s still exists!\n") % f)
703 707 elif self.dirstate.state(f) == 'a':
704 708 self.dirstate.forget([f])
705 709 elif f not in self.dirstate:
706 710 self.ui.warn(_("%s not tracked!\n") % f)
707 711 else:
708 712 self.dirstate.update([f], "r")
709 713
710 714 def undelete(self, list, wlock=None):
711 715 p = self.dirstate.parents()[0]
712 716 mn = self.changelog.read(p)[0]
713 717 mf = self.manifest.readflags(mn)
714 718 m = self.manifest.read(mn)
715 719 if not wlock:
716 720 wlock = self.wlock()
717 721 for f in list:
718 722 if self.dirstate.state(f) not in "r":
719 723 self.ui.warn("%s not removed!\n" % f)
720 724 else:
721 725 t = self.file(f).read(m[f])
722 726 self.wwrite(f, t)
723 727 util.set_exec(self.wjoin(f), mf[f])
724 728 self.dirstate.update([f], "n")
725 729
726 730 def copy(self, source, dest, wlock=None):
727 731 p = self.wjoin(dest)
728 732 if not os.path.exists(p):
729 733 self.ui.warn(_("%s does not exist!\n") % dest)
730 734 elif not os.path.isfile(p):
731 735 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
732 736 else:
733 737 if not wlock:
734 738 wlock = self.wlock()
735 739 if self.dirstate.state(dest) == '?':
736 740 self.dirstate.update([dest], "a")
737 741 self.dirstate.copy(source, dest)
738 742
739 743 def heads(self, start=None):
740 744 heads = self.changelog.heads(start)
741 745 # sort the output in rev descending order
742 746 heads = [(-self.changelog.rev(h), h) for h in heads]
743 747 heads.sort()
744 748 return [n for (r, n) in heads]
745 749
746 750 # branchlookup returns a dict giving a list of branches for
747 751 # each head. A branch is defined as the tag of a node or
748 752 # the branch of the node's parents. If a node has multiple
749 753 # branch tags, tags are eliminated if they are visible from other
750 754 # branch tags.
751 755 #
752 756 # So, for this graph: a->b->c->d->e
753 757 # \ /
754 758 # aa -----/
755 759 # a has tag 2.6.12
756 760 # d has tag 2.6.13
757 761 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
758 762 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
759 763 # from the list.
760 764 #
761 765 # It is possible that more than one head will have the same branch tag.
762 766 # callers need to check the result for multiple heads under the same
763 767 # branch tag if that is a problem for them (ie checkout of a specific
764 768 # branch).
765 769 #
766 770 # passing in a specific branch will limit the depth of the search
767 771 # through the parents. It won't limit the branches returned in the
768 772 # result though.
769 773 def branchlookup(self, heads=None, branch=None):
770 774 if not heads:
771 775 heads = self.heads()
772 776 headt = [ h for h in heads ]
773 777 chlog = self.changelog
774 778 branches = {}
775 779 merges = []
776 780 seenmerge = {}
777 781
778 782 # traverse the tree once for each head, recording in the branches
779 783 # dict which tags are visible from this head. The branches
780 784 # dict also records which tags are visible from each tag
781 785 # while we traverse.
782 786 while headt or merges:
783 787 if merges:
784 788 n, found = merges.pop()
785 789 visit = [n]
786 790 else:
787 791 h = headt.pop()
788 792 visit = [h]
789 793 found = [h]
790 794 seen = {}
791 795 while visit:
792 796 n = visit.pop()
793 797 if n in seen:
794 798 continue
795 799 pp = chlog.parents(n)
796 800 tags = self.nodetags(n)
797 801 if tags:
798 802 for x in tags:
799 803 if x == 'tip':
800 804 continue
801 805 for f in found:
802 806 branches.setdefault(f, {})[n] = 1
803 807 branches.setdefault(n, {})[n] = 1
804 808 break
805 809 if n not in found:
806 810 found.append(n)
807 811 if branch in tags:
808 812 continue
809 813 seen[n] = 1
810 814 if pp[1] != nullid and n not in seenmerge:
811 815 merges.append((pp[1], [x for x in found]))
812 816 seenmerge[n] = 1
813 817 if pp[0] != nullid:
814 818 visit.append(pp[0])
815 819 # traverse the branches dict, eliminating branch tags from each
816 820 # head that are visible from another branch tag for that head.
817 821 out = {}
818 822 viscache = {}
819 823 for h in heads:
820 824 def visible(node):
821 825 if node in viscache:
822 826 return viscache[node]
823 827 ret = {}
824 828 visit = [node]
825 829 while visit:
826 830 x = visit.pop()
827 831 if x in viscache:
828 832 ret.update(viscache[x])
829 833 elif x not in ret:
830 834 ret[x] = 1
831 835 if x in branches:
832 836 visit[len(visit):] = branches[x].keys()
833 837 viscache[node] = ret
834 838 return ret
835 839 if h not in branches:
836 840 continue
837 841 # O(n^2), but somewhat limited. This only searches the
838 842 # tags visible from a specific head, not all the tags in the
839 843 # whole repo.
840 844 for b in branches[h]:
841 845 vis = False
842 846 for bb in branches[h].keys():
843 847 if b != bb:
844 848 if b in visible(bb):
845 849 vis = True
846 850 break
847 851 if not vis:
848 852 l = out.setdefault(h, [])
849 853 l[len(l):] = self.nodetags(b)
850 854 return out
851 855
852 856 def branches(self, nodes):
853 857 if not nodes:
854 858 nodes = [self.changelog.tip()]
855 859 b = []
856 860 for n in nodes:
857 861 t = n
858 862 while n:
859 863 p = self.changelog.parents(n)
860 864 if p[1] != nullid or p[0] == nullid:
861 865 b.append((t, n, p[0], p[1]))
862 866 break
863 867 n = p[0]
864 868 return b
865 869
866 870 def between(self, pairs):
867 871 r = []
868 872
869 873 for top, bottom in pairs:
870 874 n, l, i = top, [], 0
871 875 f = 1
872 876
873 877 while n != bottom:
874 878 p = self.changelog.parents(n)[0]
875 879 if i == f:
876 880 l.append(n)
877 881 f = f * 2
878 882 n = p
879 883 i += 1
880 884
881 885 r.append(l)
882 886
883 887 return r
884 888
885 889 def findincoming(self, remote, base=None, heads=None, force=False):
886 890 m = self.changelog.nodemap
887 891 search = []
888 892 fetch = {}
889 893 seen = {}
890 894 seenbranch = {}
891 895 if base == None:
892 896 base = {}
893 897
894 898 if not heads:
895 899 heads = remote.heads()
896 900
897 901 if self.changelog.tip() == nullid:
898 902 if heads != [nullid]:
899 903 return [nullid]
900 904 return []
901 905
902 906 # assume we're closer to the tip than the root
903 907 # and start by examining the heads
904 908 self.ui.status(_("searching for changes\n"))
905 909
906 910 unknown = []
907 911 for h in heads:
908 912 if h not in m:
909 913 unknown.append(h)
910 914 else:
911 915 base[h] = 1
912 916
913 917 if not unknown:
914 918 return []
915 919
916 920 rep = {}
917 921 reqcnt = 0
918 922
919 923 # search through remote branches
920 924 # a 'branch' here is a linear segment of history, with four parts:
921 925 # head, root, first parent, second parent
922 926 # (a branch always has two parents (or none) by definition)
923 927 unknown = remote.branches(unknown)
924 928 while unknown:
925 929 r = []
926 930 while unknown:
927 931 n = unknown.pop(0)
928 932 if n[0] in seen:
929 933 continue
930 934
931 935 self.ui.debug(_("examining %s:%s\n")
932 936 % (short(n[0]), short(n[1])))
933 937 if n[0] == nullid:
934 938 break
935 939 if n in seenbranch:
936 940 self.ui.debug(_("branch already found\n"))
937 941 continue
938 942 if n[1] and n[1] in m: # do we know the base?
939 943 self.ui.debug(_("found incomplete branch %s:%s\n")
940 944 % (short(n[0]), short(n[1])))
941 945 search.append(n) # schedule branch range for scanning
942 946 seenbranch[n] = 1
943 947 else:
944 948 if n[1] not in seen and n[1] not in fetch:
945 949 if n[2] in m and n[3] in m:
946 950 self.ui.debug(_("found new changeset %s\n") %
947 951 short(n[1]))
948 952 fetch[n[1]] = 1 # earliest unknown
949 953 base[n[2]] = 1 # latest known
950 954 continue
951 955
952 956 for a in n[2:4]:
953 957 if a not in rep:
954 958 r.append(a)
955 959 rep[a] = 1
956 960
957 961 seen[n[0]] = 1
958 962
959 963 if r:
960 964 reqcnt += 1
961 965 self.ui.debug(_("request %d: %s\n") %
962 966 (reqcnt, " ".join(map(short, r))))
963 967 for p in range(0, len(r), 10):
964 968 for b in remote.branches(r[p:p+10]):
965 969 self.ui.debug(_("received %s:%s\n") %
966 970 (short(b[0]), short(b[1])))
967 971 if b[0] in m:
968 972 self.ui.debug(_("found base node %s\n")
969 973 % short(b[0]))
970 974 base[b[0]] = 1
971 975 elif b[0] not in seen:
972 976 unknown.append(b)
973 977
974 978 # do binary search on the branches we found
975 979 while search:
976 980 n = search.pop(0)
977 981 reqcnt += 1
978 982 l = remote.between([(n[0], n[1])])[0]
979 983 l.append(n[1])
980 984 p = n[0]
981 985 f = 1
982 986 for i in l:
983 987 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
984 988 if i in m:
985 989 if f <= 2:
986 990 self.ui.debug(_("found new branch changeset %s\n") %
987 991 short(p))
988 992 fetch[p] = 1
989 993 base[i] = 1
990 994 else:
991 995 self.ui.debug(_("narrowed branch search to %s:%s\n")
992 996 % (short(p), short(i)))
993 997 search.append((p, i))
994 998 break
995 999 p, f = i, f * 2
996 1000
997 1001 # sanity check our fetch list
998 1002 for f in fetch.keys():
999 1003 if f in m:
1000 1004 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1001 1005
1002 1006 if base.keys() == [nullid]:
1003 1007 if force:
1004 1008 self.ui.warn(_("warning: repository is unrelated\n"))
1005 1009 else:
1006 1010 raise util.Abort(_("repository is unrelated"))
1007 1011
1008 1012 self.ui.note(_("found new changesets starting at ") +
1009 1013 " ".join([short(f) for f in fetch]) + "\n")
1010 1014
1011 1015 self.ui.debug(_("%d total queries\n") % reqcnt)
1012 1016
1013 1017 return fetch.keys()
1014 1018
1015 1019 def findoutgoing(self, remote, base=None, heads=None, force=False):
1016 1020 """Return list of nodes that are roots of subsets not in remote
1017 1021
1018 1022 If base dict is specified, assume that these nodes and their parents
1019 1023 exist on the remote side.
1020 1024 If a list of heads is specified, return only nodes which are heads
1021 1025 or ancestors of these heads, and return a second element which
1022 1026 contains all remote heads which get new children.
1023 1027 """
1024 1028 if base == None:
1025 1029 base = {}
1026 1030 self.findincoming(remote, base, heads, force=force)
1027 1031
1028 1032 self.ui.debug(_("common changesets up to ")
1029 1033 + " ".join(map(short, base.keys())) + "\n")
1030 1034
1031 1035 remain = dict.fromkeys(self.changelog.nodemap)
1032 1036
1033 1037 # prune everything remote has from the tree
1034 1038 del remain[nullid]
1035 1039 remove = base.keys()
1036 1040 while remove:
1037 1041 n = remove.pop(0)
1038 1042 if n in remain:
1039 1043 del remain[n]
1040 1044 for p in self.changelog.parents(n):
1041 1045 remove.append(p)
1042 1046
1043 1047 # find every node whose parents have been pruned
1044 1048 subset = []
1045 1049 # find every remote head that will get new children
1046 1050 updated_heads = {}
1047 1051 for n in remain:
1048 1052 p1, p2 = self.changelog.parents(n)
1049 1053 if p1 not in remain and p2 not in remain:
1050 1054 subset.append(n)
1051 1055 if heads:
1052 1056 if p1 in heads:
1053 1057 updated_heads[p1] = True
1054 1058 if p2 in heads:
1055 1059 updated_heads[p2] = True
1056 1060
1057 1061 # this is the set of all roots we have to push
1058 1062 if heads:
1059 1063 return subset, updated_heads.keys()
1060 1064 else:
1061 1065 return subset
1062 1066
1063 1067 def pull(self, remote, heads=None, force=False):
1064 1068 l = self.lock()
1065 1069
1066 1070 fetch = self.findincoming(remote, force=force)
1067 1071 if fetch == [nullid]:
1068 1072 self.ui.status(_("requesting all changes\n"))
1069 1073
1070 1074 if not fetch:
1071 1075 self.ui.status(_("no changes found\n"))
1072 1076 return 0
1073 1077
1074 1078 if heads is None:
1075 1079 cg = remote.changegroup(fetch, 'pull')
1076 1080 else:
1077 1081 cg = remote.changegroupsubset(fetch, heads, 'pull')
1078 1082 return self.addchangegroup(cg)
1079 1083
1080 1084 def push(self, remote, force=False, revs=None):
1081 1085 lock = remote.lock()
1082 1086
1083 1087 base = {}
1084 1088 remote_heads = remote.heads()
1085 1089 inc = self.findincoming(remote, base, remote_heads, force=force)
1086 1090 if not force and inc:
1087 1091 self.ui.warn(_("abort: unsynced remote changes!\n"))
1088 1092 self.ui.status(_("(did you forget to sync?"
1089 1093 " use push -f to force)\n"))
1090 1094 return 1
1091 1095
1092 1096 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1093 1097 if revs is not None:
1094 1098 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1095 1099 else:
1096 1100 bases, heads = update, self.changelog.heads()
1097 1101
1098 1102 if not bases:
1099 1103 self.ui.status(_("no changes found\n"))
1100 1104 return 1
1101 1105 elif not force:
1102 1106 # FIXME we don't properly detect creation of new heads
1103 1107 # in the push -r case, assume the user knows what he's doing
1104 1108 if not revs and len(remote_heads) < len(heads) \
1105 1109 and remote_heads != [nullid]:
1106 1110 self.ui.warn(_("abort: push creates new remote branches!\n"))
1107 1111 self.ui.status(_("(did you forget to merge?"
1108 1112 " use push -f to force)\n"))
1109 1113 return 1
1110 1114
1111 1115 if revs is None:
1112 1116 cg = self.changegroup(update, 'push')
1113 1117 else:
1114 1118 cg = self.changegroupsubset(update, revs, 'push')
1115 1119 return remote.addchangegroup(cg)
1116 1120
1117 1121 def changegroupsubset(self, bases, heads, source):
1118 1122 """This function generates a changegroup consisting of all the nodes
1119 1123 that are descendents of any of the bases, and ancestors of any of
1120 1124 the heads.
1121 1125
1122 1126 It is fairly complex as determining which filenodes and which
1123 1127 manifest nodes need to be included for the changeset to be complete
1124 1128 is non-trivial.
1125 1129
1126 1130 Another wrinkle is doing the reverse, figuring out which changeset in
1127 1131 the changegroup a particular filenode or manifestnode belongs to."""
1128 1132
1129 1133 self.hook('preoutgoing', throw=True, source=source)
1130 1134
1131 1135 # Set up some initial variables
1132 1136 # Make it easy to refer to self.changelog
1133 1137 cl = self.changelog
1134 1138 # msng is short for missing - compute the list of changesets in this
1135 1139 # changegroup.
1136 1140 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1137 1141 # Some bases may turn out to be superfluous, and some heads may be
1138 1142 # too. nodesbetween will return the minimal set of bases and heads
1139 1143 # necessary to re-create the changegroup.
1140 1144
1141 1145 # Known heads are the list of heads that it is assumed the recipient
1142 1146 # of this changegroup will know about.
1143 1147 knownheads = {}
1144 1148 # We assume that all parents of bases are known heads.
1145 1149 for n in bases:
1146 1150 for p in cl.parents(n):
1147 1151 if p != nullid:
1148 1152 knownheads[p] = 1
1149 1153 knownheads = knownheads.keys()
1150 1154 if knownheads:
1151 1155 # Now that we know what heads are known, we can compute which
1152 1156 # changesets are known. The recipient must know about all
1153 1157 # changesets required to reach the known heads from the null
1154 1158 # changeset.
1155 1159 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1156 1160 junk = None
1157 1161 # Transform the list into an ersatz set.
1158 1162 has_cl_set = dict.fromkeys(has_cl_set)
1159 1163 else:
1160 1164 # If there were no known heads, the recipient cannot be assumed to
1161 1165 # know about any changesets.
1162 1166 has_cl_set = {}
1163 1167
1164 1168 # Make it easy to refer to self.manifest
1165 1169 mnfst = self.manifest
1166 1170 # We don't know which manifests are missing yet
1167 1171 msng_mnfst_set = {}
1168 1172 # Nor do we know which filenodes are missing.
1169 1173 msng_filenode_set = {}
1170 1174
1171 1175 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1172 1176 junk = None
1173 1177
1174 1178 # A changeset always belongs to itself, so the changenode lookup
1175 1179 # function for a changenode is identity.
1176 1180 def identity(x):
1177 1181 return x
1178 1182
1179 1183 # A function generating function. Sets up an environment for the
1180 1184 # inner function.
1181 1185 def cmp_by_rev_func(revlog):
1182 1186 # Compare two nodes by their revision number in the environment's
1183 1187 # revision history. Since the revision number both represents the
1184 1188 # most efficient order to read the nodes in, and represents a
1185 1189 # topological sorting of the nodes, this function is often useful.
1186 1190 def cmp_by_rev(a, b):
1187 1191 return cmp(revlog.rev(a), revlog.rev(b))
1188 1192 return cmp_by_rev
1189 1193
1190 1194 # If we determine that a particular file or manifest node must be a
1191 1195 # node that the recipient of the changegroup will already have, we can
1192 1196 # also assume the recipient will have all the parents. This function
1193 1197 # prunes them from the set of missing nodes.
1194 1198 def prune_parents(revlog, hasset, msngset):
1195 1199 haslst = hasset.keys()
1196 1200 haslst.sort(cmp_by_rev_func(revlog))
1197 1201 for node in haslst:
1198 1202 parentlst = [p for p in revlog.parents(node) if p != nullid]
1199 1203 while parentlst:
1200 1204 n = parentlst.pop()
1201 1205 if n not in hasset:
1202 1206 hasset[n] = 1
1203 1207 p = [p for p in revlog.parents(n) if p != nullid]
1204 1208 parentlst.extend(p)
1205 1209 for n in hasset:
1206 1210 msngset.pop(n, None)
1207 1211
1208 1212 # This is a function generating function used to set up an environment
1209 1213 # for the inner function to execute in.
1210 1214 def manifest_and_file_collector(changedfileset):
1211 1215 # This is an information gathering function that gathers
1212 1216 # information from each changeset node that goes out as part of
1213 1217 # the changegroup. The information gathered is a list of which
1214 1218 # manifest nodes are potentially required (the recipient may
1215 1219 # already have them) and total list of all files which were
1216 1220 # changed in any changeset in the changegroup.
1217 1221 #
1218 1222 # We also remember the first changenode we saw any manifest
1219 1223 # referenced by so we can later determine which changenode 'owns'
1220 1224 # the manifest.
1221 1225 def collect_manifests_and_files(clnode):
1222 1226 c = cl.read(clnode)
1223 1227 for f in c[3]:
1224 1228 # This is to make sure we only have one instance of each
1225 1229 # filename string for each filename.
1226 1230 changedfileset.setdefault(f, f)
1227 1231 msng_mnfst_set.setdefault(c[0], clnode)
1228 1232 return collect_manifests_and_files
1229 1233
1230 1234 # Figure out which manifest nodes (of the ones we think might be part
1231 1235 # of the changegroup) the recipient must know about and remove them
1232 1236 # from the changegroup.
1233 1237 def prune_manifests():
1234 1238 has_mnfst_set = {}
1235 1239 for n in msng_mnfst_set:
1236 1240 # If a 'missing' manifest thinks it belongs to a changenode
1237 1241 # the recipient is assumed to have, obviously the recipient
1238 1242 # must have that manifest.
1239 1243 linknode = cl.node(mnfst.linkrev(n))
1240 1244 if linknode in has_cl_set:
1241 1245 has_mnfst_set[n] = 1
1242 1246 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1243 1247
1244 1248 # Use the information collected in collect_manifests_and_files to say
1245 1249 # which changenode any manifestnode belongs to.
1246 1250 def lookup_manifest_link(mnfstnode):
1247 1251 return msng_mnfst_set[mnfstnode]
1248 1252
1249 1253 # A function generating function that sets up the initial environment
1250 1254 # the inner function.
1251 1255 def filenode_collector(changedfiles):
1252 1256 next_rev = [0]
1253 1257 # This gathers information from each manifestnode included in the
1254 1258 # changegroup about which filenodes the manifest node references
1255 1259 # so we can include those in the changegroup too.
1256 1260 #
1257 1261 # It also remembers which changenode each filenode belongs to. It
1258 1262 # does this by assuming the a filenode belongs to the changenode
1259 1263 # the first manifest that references it belongs to.
1260 1264 def collect_msng_filenodes(mnfstnode):
1261 1265 r = mnfst.rev(mnfstnode)
1262 1266 if r == next_rev[0]:
1263 1267 # If the last rev we looked at was the one just previous,
1264 1268 # we only need to see a diff.
1265 1269 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1266 1270 # For each line in the delta
1267 1271 for dline in delta.splitlines():
1268 1272 # get the filename and filenode for that line
1269 1273 f, fnode = dline.split('\0')
1270 1274 fnode = bin(fnode[:40])
1271 1275 f = changedfiles.get(f, None)
1272 1276 # And if the file is in the list of files we care
1273 1277 # about.
1274 1278 if f is not None:
1275 1279 # Get the changenode this manifest belongs to
1276 1280 clnode = msng_mnfst_set[mnfstnode]
1277 1281 # Create the set of filenodes for the file if
1278 1282 # there isn't one already.
1279 1283 ndset = msng_filenode_set.setdefault(f, {})
1280 1284 # And set the filenode's changelog node to the
1281 1285 # manifest's if it hasn't been set already.
1282 1286 ndset.setdefault(fnode, clnode)
1283 1287 else:
1284 1288 # Otherwise we need a full manifest.
1285 1289 m = mnfst.read(mnfstnode)
1286 1290 # For every file in we care about.
1287 1291 for f in changedfiles:
1288 1292 fnode = m.get(f, None)
1289 1293 # If it's in the manifest
1290 1294 if fnode is not None:
1291 1295 # See comments above.
1292 1296 clnode = msng_mnfst_set[mnfstnode]
1293 1297 ndset = msng_filenode_set.setdefault(f, {})
1294 1298 ndset.setdefault(fnode, clnode)
1295 1299 # Remember the revision we hope to see next.
1296 1300 next_rev[0] = r + 1
1297 1301 return collect_msng_filenodes
1298 1302
1299 1303 # We have a list of filenodes we think we need for a file, lets remove
1300 1304 # all those we now the recipient must have.
1301 1305 def prune_filenodes(f, filerevlog):
1302 1306 msngset = msng_filenode_set[f]
1303 1307 hasset = {}
1304 1308 # If a 'missing' filenode thinks it belongs to a changenode we
1305 1309 # assume the recipient must have, then the recipient must have
1306 1310 # that filenode.
1307 1311 for n in msngset:
1308 1312 clnode = cl.node(filerevlog.linkrev(n))
1309 1313 if clnode in has_cl_set:
1310 1314 hasset[n] = 1
1311 1315 prune_parents(filerevlog, hasset, msngset)
1312 1316
1313 1317 # A function generator function that sets up the a context for the
1314 1318 # inner function.
1315 1319 def lookup_filenode_link_func(fname):
1316 1320 msngset = msng_filenode_set[fname]
1317 1321 # Lookup the changenode the filenode belongs to.
1318 1322 def lookup_filenode_link(fnode):
1319 1323 return msngset[fnode]
1320 1324 return lookup_filenode_link
1321 1325
1322 1326 # Now that we have all theses utility functions to help out and
1323 1327 # logically divide up the task, generate the group.
1324 1328 def gengroup():
1325 1329 # The set of changed files starts empty.
1326 1330 changedfiles = {}
1327 1331 # Create a changenode group generator that will call our functions
1328 1332 # back to lookup the owning changenode and collect information.
1329 1333 group = cl.group(msng_cl_lst, identity,
1330 1334 manifest_and_file_collector(changedfiles))
1331 1335 for chnk in group:
1332 1336 yield chnk
1333 1337
1334 1338 # The list of manifests has been collected by the generator
1335 1339 # calling our functions back.
1336 1340 prune_manifests()
1337 1341 msng_mnfst_lst = msng_mnfst_set.keys()
1338 1342 # Sort the manifestnodes by revision number.
1339 1343 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1340 1344 # Create a generator for the manifestnodes that calls our lookup
1341 1345 # and data collection functions back.
1342 1346 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1343 1347 filenode_collector(changedfiles))
1344 1348 for chnk in group:
1345 1349 yield chnk
1346 1350
1347 1351 # These are no longer needed, dereference and toss the memory for
1348 1352 # them.
1349 1353 msng_mnfst_lst = None
1350 1354 msng_mnfst_set.clear()
1351 1355
1352 1356 changedfiles = changedfiles.keys()
1353 1357 changedfiles.sort()
1354 1358 # Go through all our files in order sorted by name.
1355 1359 for fname in changedfiles:
1356 1360 filerevlog = self.file(fname)
1357 1361 # Toss out the filenodes that the recipient isn't really
1358 1362 # missing.
1359 1363 if msng_filenode_set.has_key(fname):
1360 1364 prune_filenodes(fname, filerevlog)
1361 1365 msng_filenode_lst = msng_filenode_set[fname].keys()
1362 1366 else:
1363 1367 msng_filenode_lst = []
1364 1368 # If any filenodes are left, generate the group for them,
1365 1369 # otherwise don't bother.
1366 1370 if len(msng_filenode_lst) > 0:
1367 1371 yield changegroup.genchunk(fname)
1368 1372 # Sort the filenodes by their revision #
1369 1373 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1370 1374 # Create a group generator and only pass in a changenode
1371 1375 # lookup function as we need to collect no information
1372 1376 # from filenodes.
1373 1377 group = filerevlog.group(msng_filenode_lst,
1374 1378 lookup_filenode_link_func(fname))
1375 1379 for chnk in group:
1376 1380 yield chnk
1377 1381 if msng_filenode_set.has_key(fname):
1378 1382 # Don't need this anymore, toss it to free memory.
1379 1383 del msng_filenode_set[fname]
1380 1384 # Signal that no more groups are left.
1381 1385 yield changegroup.closechunk()
1382 1386
1383 1387 if msng_cl_lst:
1384 1388 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1385 1389
1386 1390 return util.chunkbuffer(gengroup())
1387 1391
1388 1392 def changegroup(self, basenodes, source):
1389 1393 """Generate a changegroup of all nodes that we have that a recipient
1390 1394 doesn't.
1391 1395
1392 1396 This is much easier than the previous function as we can assume that
1393 1397 the recipient has any changenode we aren't sending them."""
1394 1398
1395 1399 self.hook('preoutgoing', throw=True, source=source)
1396 1400
1397 1401 cl = self.changelog
1398 1402 nodes = cl.nodesbetween(basenodes, None)[0]
1399 1403 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1400 1404
1401 1405 def identity(x):
1402 1406 return x
1403 1407
1404 1408 def gennodelst(revlog):
1405 1409 for r in xrange(0, revlog.count()):
1406 1410 n = revlog.node(r)
1407 1411 if revlog.linkrev(n) in revset:
1408 1412 yield n
1409 1413
1410 1414 def changed_file_collector(changedfileset):
1411 1415 def collect_changed_files(clnode):
1412 1416 c = cl.read(clnode)
1413 1417 for fname in c[3]:
1414 1418 changedfileset[fname] = 1
1415 1419 return collect_changed_files
1416 1420
1417 1421 def lookuprevlink_func(revlog):
1418 1422 def lookuprevlink(n):
1419 1423 return cl.node(revlog.linkrev(n))
1420 1424 return lookuprevlink
1421 1425
1422 1426 def gengroup():
1423 1427 # construct a list of all changed files
1424 1428 changedfiles = {}
1425 1429
1426 1430 for chnk in cl.group(nodes, identity,
1427 1431 changed_file_collector(changedfiles)):
1428 1432 yield chnk
1429 1433 changedfiles = changedfiles.keys()
1430 1434 changedfiles.sort()
1431 1435
1432 1436 mnfst = self.manifest
1433 1437 nodeiter = gennodelst(mnfst)
1434 1438 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1435 1439 yield chnk
1436 1440
1437 1441 for fname in changedfiles:
1438 1442 filerevlog = self.file(fname)
1439 1443 nodeiter = gennodelst(filerevlog)
1440 1444 nodeiter = list(nodeiter)
1441 1445 if nodeiter:
1442 1446 yield changegroup.genchunk(fname)
1443 1447 lookup = lookuprevlink_func(filerevlog)
1444 1448 for chnk in filerevlog.group(nodeiter, lookup):
1445 1449 yield chnk
1446 1450
1447 1451 yield changegroup.closechunk()
1448 1452
1449 1453 if nodes:
1450 1454 self.hook('outgoing', node=hex(nodes[0]), source=source)
1451 1455
1452 1456 return util.chunkbuffer(gengroup())
1453 1457
1454 1458 def addchangegroup(self, source):
1455 1459 """add changegroup to repo.
1456 1460 returns number of heads modified or added + 1."""
1457 1461
1458 1462 def csmap(x):
1459 1463 self.ui.debug(_("add changeset %s\n") % short(x))
1460 1464 return cl.count()
1461 1465
1462 1466 def revmap(x):
1463 1467 return cl.rev(x)
1464 1468
1465 1469 if not source:
1466 1470 return 0
1467 1471
1468 1472 self.hook('prechangegroup', throw=True)
1469 1473
1470 1474 changesets = files = revisions = 0
1471 1475
1472 1476 tr = self.transaction()
1473 1477
1474 1478 # write changelog and manifest data to temp files so
1475 1479 # concurrent readers will not see inconsistent view
1476 1480 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1477 1481
1478 1482 oldheads = len(cl.heads())
1479 1483
1480 1484 # pull off the changeset group
1481 1485 self.ui.status(_("adding changesets\n"))
1482 1486 co = cl.tip()
1483 1487 chunkiter = changegroup.chunkiter(source)
1484 1488 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1485 1489 cnr, cor = map(cl.rev, (cn, co))
1486 1490 if cn == nullid:
1487 1491 cnr = cor
1488 1492 changesets = cnr - cor
1489 1493
1490 1494 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1491 1495
1492 1496 # pull off the manifest group
1493 1497 self.ui.status(_("adding manifests\n"))
1494 1498 mm = mf.tip()
1495 1499 chunkiter = changegroup.chunkiter(source)
1496 1500 mo = mf.addgroup(chunkiter, revmap, tr)
1497 1501
1498 1502 # process the files
1499 1503 self.ui.status(_("adding file changes\n"))
1500 1504 while 1:
1501 1505 f = changegroup.getchunk(source)
1502 1506 if not f:
1503 1507 break
1504 1508 self.ui.debug(_("adding %s revisions\n") % f)
1505 1509 fl = self.file(f)
1506 1510 o = fl.count()
1507 1511 chunkiter = changegroup.chunkiter(source)
1508 1512 n = fl.addgroup(chunkiter, revmap, tr)
1509 1513 revisions += fl.count() - o
1510 1514 files += 1
1511 1515
1512 1516 # write order here is important so concurrent readers will see
1513 1517 # consistent view of repo
1514 1518 mf.writedata()
1515 1519 cl.writedata()
1516 1520
1517 1521 # make changelog and manifest see real files again
1518 1522 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1519 1523 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1520 1524 self.changelog.checkinlinesize(tr)
1521 1525 self.manifest.checkinlinesize(tr)
1522 1526
1523 1527 newheads = len(self.changelog.heads())
1524 1528 heads = ""
1525 1529 if oldheads and newheads > oldheads:
1526 1530 heads = _(" (+%d heads)") % (newheads - oldheads)
1527 1531
1528 1532 self.ui.status(_("added %d changesets"
1529 1533 " with %d changes to %d files%s\n")
1530 1534 % (changesets, revisions, files, heads))
1531 1535
1532 1536 self.hook('pretxnchangegroup', throw=True,
1533 1537 node=hex(self.changelog.node(cor+1)))
1534 1538
1535 1539 tr.close()
1536 1540
1537 1541 if changesets > 0:
1538 1542 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1539 1543
1540 1544 for i in range(cor + 1, cnr + 1):
1541 1545 self.hook("incoming", node=hex(self.changelog.node(i)))
1542 1546
1543 1547 return newheads - oldheads + 1
1544 1548
1545 1549 def update(self, node, allow=False, force=False, choose=None,
1546 1550 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1547 1551 pl = self.dirstate.parents()
1548 1552 if not force and pl[1] != nullid:
1549 1553 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1550 1554 return 1
1551 1555
1552 1556 err = False
1553 1557
1554 1558 p1, p2 = pl[0], node
1555 1559 pa = self.changelog.ancestor(p1, p2)
1556 1560 m1n = self.changelog.read(p1)[0]
1557 1561 m2n = self.changelog.read(p2)[0]
1558 1562 man = self.manifest.ancestor(m1n, m2n)
1559 1563 m1 = self.manifest.read(m1n)
1560 1564 mf1 = self.manifest.readflags(m1n)
1561 1565 m2 = self.manifest.read(m2n).copy()
1562 1566 mf2 = self.manifest.readflags(m2n)
1563 1567 ma = self.manifest.read(man)
1564 1568 mfa = self.manifest.readflags(man)
1565 1569
1566 1570 modified, added, removed, deleted, unknown = self.changes()
1567 1571
1568 1572 # is this a jump, or a merge? i.e. is there a linear path
1569 1573 # from p1 to p2?
1570 1574 linear_path = (pa == p1 or pa == p2)
1571 1575
1572 1576 if allow and linear_path:
1573 1577 raise util.Abort(_("there is nothing to merge, "
1574 1578 "just use 'hg update'"))
1575 1579 if allow and not forcemerge:
1576 1580 if modified or added or removed:
1577 1581 raise util.Abort(_("outstanding uncommitted changes"))
1578 1582 if not forcemerge and not force:
1579 1583 for f in unknown:
1580 1584 if f in m2:
1581 1585 t1 = self.wread(f)
1582 1586 t2 = self.file(f).read(m2[f])
1583 1587 if cmp(t1, t2) != 0:
1584 1588 raise util.Abort(_("'%s' already exists in the working"
1585 1589 " dir and differs from remote") % f)
1586 1590
1587 1591 # resolve the manifest to determine which files
1588 1592 # we care about merging
1589 1593 self.ui.note(_("resolving manifests\n"))
1590 1594 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1591 1595 (force, allow, moddirstate, linear_path))
1592 1596 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1593 1597 (short(man), short(m1n), short(m2n)))
1594 1598
1595 1599 merge = {}
1596 1600 get = {}
1597 1601 remove = []
1598 1602
1599 1603 # construct a working dir manifest
1600 1604 mw = m1.copy()
1601 1605 mfw = mf1.copy()
1602 1606 umap = dict.fromkeys(unknown)
1603 1607
1604 1608 for f in added + modified + unknown:
1605 1609 mw[f] = ""
1606 1610 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1607 1611
1608 1612 if moddirstate and not wlock:
1609 1613 wlock = self.wlock()
1610 1614
1611 1615 for f in deleted + removed:
1612 1616 if f in mw:
1613 1617 del mw[f]
1614 1618
1615 1619 # If we're jumping between revisions (as opposed to merging),
1616 1620 # and if neither the working directory nor the target rev has
1617 1621 # the file, then we need to remove it from the dirstate, to
1618 1622 # prevent the dirstate from listing the file when it is no
1619 1623 # longer in the manifest.
1620 1624 if moddirstate and linear_path and f not in m2:
1621 1625 self.dirstate.forget((f,))
1622 1626
1623 1627 # Compare manifests
1624 1628 for f, n in mw.iteritems():
1625 1629 if choose and not choose(f):
1626 1630 continue
1627 1631 if f in m2:
1628 1632 s = 0
1629 1633
1630 1634 # is the wfile new since m1, and match m2?
1631 1635 if f not in m1:
1632 1636 t1 = self.wread(f)
1633 1637 t2 = self.file(f).read(m2[f])
1634 1638 if cmp(t1, t2) == 0:
1635 1639 n = m2[f]
1636 1640 del t1, t2
1637 1641
1638 1642 # are files different?
1639 1643 if n != m2[f]:
1640 1644 a = ma.get(f, nullid)
1641 1645 # are both different from the ancestor?
1642 1646 if n != a and m2[f] != a:
1643 1647 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1644 1648 # merge executable bits
1645 1649 # "if we changed or they changed, change in merge"
1646 1650 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1647 1651 mode = ((a^b) | (a^c)) ^ a
1648 1652 merge[f] = (m1.get(f, nullid), m2[f], mode)
1649 1653 s = 1
1650 1654 # are we clobbering?
1651 1655 # is remote's version newer?
1652 1656 # or are we going back in time?
1653 1657 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1654 1658 self.ui.debug(_(" remote %s is newer, get\n") % f)
1655 1659 get[f] = m2[f]
1656 1660 s = 1
1657 1661 elif f in umap or f in added:
1658 1662 # this unknown file is the same as the checkout
1659 1663 # we need to reset the dirstate if the file was added
1660 1664 get[f] = m2[f]
1661 1665
1662 1666 if not s and mfw[f] != mf2[f]:
1663 1667 if force:
1664 1668 self.ui.debug(_(" updating permissions for %s\n") % f)
1665 1669 util.set_exec(self.wjoin(f), mf2[f])
1666 1670 else:
1667 1671 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1668 1672 mode = ((a^b) | (a^c)) ^ a
1669 1673 if mode != b:
1670 1674 self.ui.debug(_(" updating permissions for %s\n")
1671 1675 % f)
1672 1676 util.set_exec(self.wjoin(f), mode)
1673 1677 del m2[f]
1674 1678 elif f in ma:
1675 1679 if n != ma[f]:
1676 1680 r = _("d")
1677 1681 if not force and (linear_path or allow):
1678 1682 r = self.ui.prompt(
1679 1683 (_(" local changed %s which remote deleted\n") % f) +
1680 1684 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1681 1685 if r == _("d"):
1682 1686 remove.append(f)
1683 1687 else:
1684 1688 self.ui.debug(_("other deleted %s\n") % f)
1685 1689 remove.append(f) # other deleted it
1686 1690 else:
1687 1691 # file is created on branch or in working directory
1688 1692 if force and f not in umap:
1689 1693 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1690 1694 remove.append(f)
1691 1695 elif n == m1.get(f, nullid): # same as parent
1692 1696 if p2 == pa: # going backwards?
1693 1697 self.ui.debug(_("remote deleted %s\n") % f)
1694 1698 remove.append(f)
1695 1699 else:
1696 1700 self.ui.debug(_("local modified %s, keeping\n") % f)
1697 1701 else:
1698 1702 self.ui.debug(_("working dir created %s, keeping\n") % f)
1699 1703
1700 1704 for f, n in m2.iteritems():
1701 1705 if choose and not choose(f):
1702 1706 continue
1703 1707 if f[0] == "/":
1704 1708 continue
1705 1709 if f in ma and n != ma[f]:
1706 1710 r = _("k")
1707 1711 if not force and (linear_path or allow):
1708 1712 r = self.ui.prompt(
1709 1713 (_("remote changed %s which local deleted\n") % f) +
1710 1714 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1711 1715 if r == _("k"):
1712 1716 get[f] = n
1713 1717 elif f not in ma:
1714 1718 self.ui.debug(_("remote created %s\n") % f)
1715 1719 get[f] = n
1716 1720 else:
1717 1721 if force or p2 == pa: # going backwards?
1718 1722 self.ui.debug(_("local deleted %s, recreating\n") % f)
1719 1723 get[f] = n
1720 1724 else:
1721 1725 self.ui.debug(_("local deleted %s\n") % f)
1722 1726
1723 1727 del mw, m1, m2, ma
1724 1728
1725 1729 if force:
1726 1730 for f in merge:
1727 1731 get[f] = merge[f][1]
1728 1732 merge = {}
1729 1733
1730 1734 if linear_path or force:
1731 1735 # we don't need to do any magic, just jump to the new rev
1732 1736 branch_merge = False
1733 1737 p1, p2 = p2, nullid
1734 1738 else:
1735 1739 if not allow:
1736 1740 self.ui.status(_("this update spans a branch"
1737 1741 " affecting the following files:\n"))
1738 1742 fl = merge.keys() + get.keys()
1739 1743 fl.sort()
1740 1744 for f in fl:
1741 1745 cf = ""
1742 1746 if f in merge:
1743 1747 cf = _(" (resolve)")
1744 1748 self.ui.status(" %s%s\n" % (f, cf))
1745 1749 self.ui.warn(_("aborting update spanning branches!\n"))
1746 1750 self.ui.status(_("(use 'hg merge' to merge across branches"
1747 1751 " or 'hg update -C' to lose changes)\n"))
1748 1752 return 1
1749 1753 branch_merge = True
1750 1754
1751 1755 # get the files we don't need to change
1752 1756 files = get.keys()
1753 1757 files.sort()
1754 1758 for f in files:
1755 1759 if f[0] == "/":
1756 1760 continue
1757 1761 self.ui.note(_("getting %s\n") % f)
1758 1762 t = self.file(f).read(get[f])
1759 1763 self.wwrite(f, t)
1760 1764 util.set_exec(self.wjoin(f), mf2[f])
1761 1765 if moddirstate:
1762 1766 if branch_merge:
1763 1767 self.dirstate.update([f], 'n', st_mtime=-1)
1764 1768 else:
1765 1769 self.dirstate.update([f], 'n')
1766 1770
1767 1771 # merge the tricky bits
1768 1772 failedmerge = []
1769 1773 files = merge.keys()
1770 1774 files.sort()
1771 1775 xp1 = hex(p1)
1772 1776 xp2 = hex(p2)
1773 1777 for f in files:
1774 1778 self.ui.status(_("merging %s\n") % f)
1775 1779 my, other, flag = merge[f]
1776 1780 ret = self.merge3(f, my, other, xp1, xp2)
1777 1781 if ret:
1778 1782 err = True
1779 1783 failedmerge.append(f)
1780 1784 util.set_exec(self.wjoin(f), flag)
1781 1785 if moddirstate:
1782 1786 if branch_merge:
1783 1787 # We've done a branch merge, mark this file as merged
1784 1788 # so that we properly record the merger later
1785 1789 self.dirstate.update([f], 'm')
1786 1790 else:
1787 1791 # We've update-merged a locally modified file, so
1788 1792 # we set the dirstate to emulate a normal checkout
1789 1793 # of that file some time in the past. Thus our
1790 1794 # merge will appear as a normal local file
1791 1795 # modification.
1792 1796 f_len = len(self.file(f).read(other))
1793 1797 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1794 1798
1795 1799 remove.sort()
1796 1800 for f in remove:
1797 1801 self.ui.note(_("removing %s\n") % f)
1798 1802 util.audit_path(f)
1799 1803 try:
1800 1804 util.unlink(self.wjoin(f))
1801 1805 except OSError, inst:
1802 1806 if inst.errno != errno.ENOENT:
1803 1807 self.ui.warn(_("update failed to remove %s: %s!\n") %
1804 1808 (f, inst.strerror))
1805 1809 if moddirstate:
1806 1810 if branch_merge:
1807 1811 self.dirstate.update(remove, 'r')
1808 1812 else:
1809 1813 self.dirstate.forget(remove)
1810 1814
1811 1815 if moddirstate:
1812 1816 self.dirstate.setparents(p1, p2)
1813 1817
1814 1818 if show_stats:
1815 1819 stats = ((len(get), _("updated")),
1816 1820 (len(merge) - len(failedmerge), _("merged")),
1817 1821 (len(remove), _("removed")),
1818 1822 (len(failedmerge), _("unresolved")))
1819 1823 note = ", ".join([_("%d files %s") % s for s in stats])
1820 1824 self.ui.status("%s\n" % note)
1821 1825 if moddirstate:
1822 1826 if branch_merge:
1823 1827 if failedmerge:
1824 1828 self.ui.status(_("There are unresolved merges,"
1825 1829 " you can redo the full merge using:\n"
1826 1830 " hg update -C %s\n"
1827 1831 " hg merge %s\n"
1828 1832 % (self.changelog.rev(p1),
1829 1833 self.changelog.rev(p2))))
1830 1834 else:
1831 1835 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1832 1836 elif failedmerge:
1833 1837 self.ui.status(_("There are unresolved merges with"
1834 1838 " locally modified files.\n"))
1835 1839
1836 1840 return err
1837 1841
1838 1842 def merge3(self, fn, my, other, p1, p2):
1839 1843 """perform a 3-way merge in the working directory"""
1840 1844
1841 1845 def temp(prefix, node):
1842 1846 pre = "%s~%s." % (os.path.basename(fn), prefix)
1843 1847 (fd, name) = tempfile.mkstemp(prefix=pre)
1844 1848 f = os.fdopen(fd, "wb")
1845 1849 self.wwrite(fn, fl.read(node), f)
1846 1850 f.close()
1847 1851 return name
1848 1852
1849 1853 fl = self.file(fn)
1850 1854 base = fl.ancestor(my, other)
1851 1855 a = self.wjoin(fn)
1852 1856 b = temp("base", base)
1853 1857 c = temp("other", other)
1854 1858
1855 1859 self.ui.note(_("resolving %s\n") % fn)
1856 1860 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1857 1861 (fn, short(my), short(other), short(base)))
1858 1862
1859 1863 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1860 1864 or "hgmerge")
1861 1865 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1862 1866 environ={'HG_FILE': fn,
1863 1867 'HG_MY_NODE': p1,
1864 1868 'HG_OTHER_NODE': p2,
1865 1869 'HG_FILE_MY_NODE': hex(my),
1866 1870 'HG_FILE_OTHER_NODE': hex(other),
1867 1871 'HG_FILE_BASE_NODE': hex(base)})
1868 1872 if r:
1869 1873 self.ui.warn(_("merging %s failed!\n") % fn)
1870 1874
1871 1875 os.unlink(b)
1872 1876 os.unlink(c)
1873 1877 return r
1874 1878
1875 1879 def verify(self):
1876 1880 filelinkrevs = {}
1877 1881 filenodes = {}
1878 1882 changesets = revisions = files = 0
1879 1883 errors = [0]
1880 1884 warnings = [0]
1881 1885 neededmanifests = {}
1882 1886
1883 1887 def err(msg):
1884 1888 self.ui.warn(msg + "\n")
1885 1889 errors[0] += 1
1886 1890
1887 1891 def warn(msg):
1888 1892 self.ui.warn(msg + "\n")
1889 1893 warnings[0] += 1
1890 1894
1891 1895 def checksize(obj, name):
1892 1896 d = obj.checksize()
1893 1897 if d[0]:
1894 1898 err(_("%s data length off by %d bytes") % (name, d[0]))
1895 1899 if d[1]:
1896 1900 err(_("%s index contains %d extra bytes") % (name, d[1]))
1897 1901
1898 1902 def checkversion(obj, name):
1899 1903 if obj.version != revlog.REVLOGV0:
1900 1904 if not revlogv1:
1901 1905 warn(_("warning: `%s' uses revlog format 1") % name)
1902 1906 elif revlogv1:
1903 1907 warn(_("warning: `%s' uses revlog format 0") % name)
1904 1908
1905 1909 revlogv1 = self.revlogversion != revlog.REVLOGV0
1906 1910 if self.ui.verbose or revlogv1 != self.revlogv1:
1907 1911 self.ui.status(_("repository uses revlog format %d\n") %
1908 1912 (revlogv1 and 1 or 0))
1909 1913
1910 1914 seen = {}
1911 1915 self.ui.status(_("checking changesets\n"))
1912 1916 checksize(self.changelog, "changelog")
1913 1917
1914 1918 for i in range(self.changelog.count()):
1915 1919 changesets += 1
1916 1920 n = self.changelog.node(i)
1917 1921 l = self.changelog.linkrev(n)
1918 1922 if l != i:
1919 1923 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1920 1924 if n in seen:
1921 1925 err(_("duplicate changeset at revision %d") % i)
1922 1926 seen[n] = 1
1923 1927
1924 1928 for p in self.changelog.parents(n):
1925 1929 if p not in self.changelog.nodemap:
1926 1930 err(_("changeset %s has unknown parent %s") %
1927 1931 (short(n), short(p)))
1928 1932 try:
1929 1933 changes = self.changelog.read(n)
1930 1934 except KeyboardInterrupt:
1931 1935 self.ui.warn(_("interrupted"))
1932 1936 raise
1933 1937 except Exception, inst:
1934 1938 err(_("unpacking changeset %s: %s") % (short(n), inst))
1935 1939 continue
1936 1940
1937 1941 neededmanifests[changes[0]] = n
1938 1942
1939 1943 for f in changes[3]:
1940 1944 filelinkrevs.setdefault(f, []).append(i)
1941 1945
1942 1946 seen = {}
1943 1947 self.ui.status(_("checking manifests\n"))
1944 1948 checkversion(self.manifest, "manifest")
1945 1949 checksize(self.manifest, "manifest")
1946 1950
1947 1951 for i in range(self.manifest.count()):
1948 1952 n = self.manifest.node(i)
1949 1953 l = self.manifest.linkrev(n)
1950 1954
1951 1955 if l < 0 or l >= self.changelog.count():
1952 1956 err(_("bad manifest link (%d) at revision %d") % (l, i))
1953 1957
1954 1958 if n in neededmanifests:
1955 1959 del neededmanifests[n]
1956 1960
1957 1961 if n in seen:
1958 1962 err(_("duplicate manifest at revision %d") % i)
1959 1963
1960 1964 seen[n] = 1
1961 1965
1962 1966 for p in self.manifest.parents(n):
1963 1967 if p not in self.manifest.nodemap:
1964 1968 err(_("manifest %s has unknown parent %s") %
1965 1969 (short(n), short(p)))
1966 1970
1967 1971 try:
1968 1972 delta = mdiff.patchtext(self.manifest.delta(n))
1969 1973 except KeyboardInterrupt:
1970 1974 self.ui.warn(_("interrupted"))
1971 1975 raise
1972 1976 except Exception, inst:
1973 1977 err(_("unpacking manifest %s: %s") % (short(n), inst))
1974 1978 continue
1975 1979
1976 1980 try:
1977 1981 ff = [ l.split('\0') for l in delta.splitlines() ]
1978 1982 for f, fn in ff:
1979 1983 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1980 1984 except (ValueError, TypeError), inst:
1981 1985 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1982 1986
1983 1987 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1984 1988
1985 1989 for m, c in neededmanifests.items():
1986 1990 err(_("Changeset %s refers to unknown manifest %s") %
1987 1991 (short(m), short(c)))
1988 1992 del neededmanifests
1989 1993
1990 1994 for f in filenodes:
1991 1995 if f not in filelinkrevs:
1992 1996 err(_("file %s in manifest but not in changesets") % f)
1993 1997
1994 1998 for f in filelinkrevs:
1995 1999 if f not in filenodes:
1996 2000 err(_("file %s in changeset but not in manifest") % f)
1997 2001
1998 2002 self.ui.status(_("checking files\n"))
1999 2003 ff = filenodes.keys()
2000 2004 ff.sort()
2001 2005 for f in ff:
2002 2006 if f == "/dev/null":
2003 2007 continue
2004 2008 files += 1
2005 2009 if not f:
2006 2010 err(_("file without name in manifest %s") % short(n))
2007 2011 continue
2008 2012 fl = self.file(f)
2009 2013 checkversion(fl, f)
2010 2014 checksize(fl, f)
2011 2015
2012 2016 nodes = {nullid: 1}
2013 2017 seen = {}
2014 2018 for i in range(fl.count()):
2015 2019 revisions += 1
2016 2020 n = fl.node(i)
2017 2021
2018 2022 if n in seen:
2019 2023 err(_("%s: duplicate revision %d") % (f, i))
2020 2024 if n not in filenodes[f]:
2021 2025 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2022 2026 else:
2023 2027 del filenodes[f][n]
2024 2028
2025 2029 flr = fl.linkrev(n)
2026 2030 if flr not in filelinkrevs.get(f, []):
2027 2031 err(_("%s:%s points to unexpected changeset %d")
2028 2032 % (f, short(n), flr))
2029 2033 else:
2030 2034 filelinkrevs[f].remove(flr)
2031 2035
2032 2036 # verify contents
2033 2037 try:
2034 2038 t = fl.read(n)
2035 2039 except KeyboardInterrupt:
2036 2040 self.ui.warn(_("interrupted"))
2037 2041 raise
2038 2042 except Exception, inst:
2039 2043 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2040 2044
2041 2045 # verify parents
2042 2046 (p1, p2) = fl.parents(n)
2043 2047 if p1 not in nodes:
2044 2048 err(_("file %s:%s unknown parent 1 %s") %
2045 2049 (f, short(n), short(p1)))
2046 2050 if p2 not in nodes:
2047 2051 err(_("file %s:%s unknown parent 2 %s") %
2048 2052 (f, short(n), short(p1)))
2049 2053 nodes[n] = 1
2050 2054
2051 2055 # cross-check
2052 2056 for node in filenodes[f]:
2053 2057 err(_("node %s in manifests not in %s") % (hex(node), f))
2054 2058
2055 2059 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2056 2060 (files, changesets, revisions))
2057 2061
2058 2062 if warnings[0]:
2059 2063 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2060 2064 if errors[0]:
2061 2065 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2062 2066 return 1
2063 2067
2064 2068 # used to avoid circular references so destructors work
2065 2069 def aftertrans(base):
2066 2070 p = base
2067 2071 def a():
2068 2072 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2069 2073 util.rename(os.path.join(p, "journal.dirstate"),
2070 2074 os.path.join(p, "undo.dirstate"))
2071 2075 return a
2072 2076
@@ -1,1241 +1,1246 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
17 17 demandload(globals(), "sha struct util zlib")
18 18
19 19 # revlog version strings
20 20 REVLOGV0 = 0
21 21 REVLOGNG = 1
22 22
23 23 # revlog flags
24 24 REVLOGNGINLINEDATA = (1 << 16)
25 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
26
27 REVLOG_DEFAULT_FORMAT = REVLOGNG
28 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
25 29
26 30 def flagstr(flag):
27 31 if flag == "inline":
28 32 return REVLOGNGINLINEDATA
29 33 raise RevlogError(_("unknown revlog flag %s" % flag))
30 34
31 35 def hash(text, p1, p2):
32 36 """generate a hash from the given text and its parent hashes
33 37
34 38 This hash combines both the current file contents and its history
35 39 in a manner that makes it easy to distinguish nodes with the same
36 40 content in the revision graph.
37 41 """
38 42 l = [p1, p2]
39 43 l.sort()
40 44 s = sha.new(l[0])
41 45 s.update(l[1])
42 46 s.update(text)
43 47 return s.digest()
44 48
45 49 def compress(text):
46 50 """ generate a possibly-compressed representation of text """
47 51 if not text: return ("", text)
48 52 if len(text) < 44:
49 53 if text[0] == '\0': return ("", text)
50 54 return ('u', text)
51 55 bin = zlib.compress(text)
52 56 if len(bin) > len(text):
53 57 if text[0] == '\0': return ("", text)
54 58 return ('u', text)
55 59 return ("", bin)
56 60
57 61 def decompress(bin):
58 62 """ decompress the given input """
59 63 if not bin: return bin
60 64 t = bin[0]
61 65 if t == '\0': return bin
62 66 if t == 'x': return zlib.decompress(bin)
63 67 if t == 'u': return bin[1:]
64 68 raise RevlogError(_("unknown compression type %r") % t)
65 69
66 70 indexformatv0 = ">4l20s20s20s"
67 71 v0shaoffset = 56
68 72 # index ng:
69 73 # 6 bytes offset
70 74 # 2 bytes flags
71 75 # 4 bytes compressed length
72 76 # 4 bytes uncompressed length
73 77 # 4 bytes: base rev
74 78 # 4 bytes link rev
75 79 # 4 bytes parent 1 rev
76 80 # 4 bytes parent 2 rev
77 81 # 32 bytes: nodeid
78 82 indexformatng = ">Qiiiiii20s12x"
79 83 ngshaoffset = 32
80 84 versionformat = ">i"
81 85
82 86 class lazyparser(object):
83 87 """
84 88 this class avoids the need to parse the entirety of large indices
85 89 """
86 90 def __init__(self, dataf, size, indexformat, shaoffset):
87 91 self.dataf = dataf
88 92 self.format = indexformat
89 93 self.s = struct.calcsize(indexformat)
90 94 self.indexformat = indexformat
91 95 self.datasize = size
92 96 self.l = size/self.s
93 97 self.index = [None] * self.l
94 98 self.map = {nullid: -1}
95 99 self.allmap = 0
96 100 self.all = 0
97 101 self.mapfind_count = 0
98 102 self.shaoffset = shaoffset
99 103
100 104 def loadmap(self):
101 105 """
102 106 during a commit, we need to make sure the rev being added is
103 107 not a duplicate. This requires loading the entire index,
104 108 which is fairly slow. loadmap can load up just the node map,
105 109 which takes much less time.
106 110 """
107 111 if self.allmap: return
108 112 start = 0
109 113 end = self.datasize
110 114 self.allmap = 1
111 115 cur = 0
112 116 count = 0
113 117 blocksize = self.s * 256
114 118 self.dataf.seek(0)
115 119 while cur < end:
116 120 data = self.dataf.read(blocksize)
117 121 off = 0
118 122 for x in xrange(256):
119 123 n = data[off + self.shaoffset:off + self.shaoffset + 20]
120 124 self.map[n] = count
121 125 count += 1
122 126 if count >= self.l:
123 127 break
124 128 off += self.s
125 129 cur += blocksize
126 130
127 131 def loadblock(self, blockstart, blocksize, data=None):
128 132 if self.all: return
129 133 if data is None:
130 134 self.dataf.seek(blockstart)
131 135 data = self.dataf.read(blocksize)
132 136 lend = len(data) / self.s
133 137 i = blockstart / self.s
134 138 off = 0
135 139 for x in xrange(lend):
136 140 if self.index[i + x] == None:
137 141 b = data[off : off + self.s]
138 142 self.index[i + x] = b
139 143 n = b[self.shaoffset:self.shaoffset + 20]
140 144 self.map[n] = i + x
141 145 off += self.s
142 146
143 147 def findnode(self, node):
144 148 """search backwards through the index file for a specific node"""
145 149 if self.allmap: return None
146 150
147 151 # hg log will cause many many searches for the manifest
148 152 # nodes. After we get called a few times, just load the whole
149 153 # thing.
150 154 if self.mapfind_count > 8:
151 155 self.loadmap()
152 156 if node in self.map:
153 157 return node
154 158 return None
155 159 self.mapfind_count += 1
156 160 last = self.l - 1
157 161 while self.index[last] != None:
158 162 if last == 0:
159 163 self.all = 1
160 164 self.allmap = 1
161 165 return None
162 166 last -= 1
163 167 end = (last + 1) * self.s
164 168 blocksize = self.s * 256
165 169 while end >= 0:
166 170 start = max(end - blocksize, 0)
167 171 self.dataf.seek(start)
168 172 data = self.dataf.read(end - start)
169 173 findend = end - start
170 174 while True:
171 175 # we're searching backwards, so weh have to make sure
172 176 # we don't find a changeset where this node is a parent
173 177 off = data.rfind(node, 0, findend)
174 178 findend = off
175 179 if off >= 0:
176 180 i = off / self.s
177 181 off = i * self.s
178 182 n = data[off + self.shaoffset:off + self.shaoffset + 20]
179 183 if n == node:
180 184 self.map[n] = i + start / self.s
181 185 return node
182 186 else:
183 187 break
184 188 end -= blocksize
185 189 return None
186 190
187 191 def loadindex(self, i=None, end=None):
188 192 if self.all: return
189 193 all = False
190 194 if i == None:
191 195 blockstart = 0
192 196 blocksize = (512 / self.s) * self.s
193 197 end = self.datasize
194 198 all = True
195 199 else:
196 200 if end:
197 201 blockstart = i * self.s
198 202 end = end * self.s
199 203 blocksize = end - blockstart
200 204 else:
201 205 blockstart = (i & ~(32)) * self.s
202 206 blocksize = self.s * 64
203 207 end = blockstart + blocksize
204 208 while blockstart < end:
205 209 self.loadblock(blockstart, blocksize)
206 210 blockstart += blocksize
207 211 if all: self.all = True
208 212
209 213 class lazyindex(object):
210 214 """a lazy version of the index array"""
211 215 def __init__(self, parser):
212 216 self.p = parser
213 217 def __len__(self):
214 218 return len(self.p.index)
215 219 def load(self, pos):
216 220 if pos < 0:
217 221 pos += len(self.p.index)
218 222 self.p.loadindex(pos)
219 223 return self.p.index[pos]
220 224 def __getitem__(self, pos):
221 225 ret = self.p.index[pos] or self.load(pos)
222 226 if isinstance(ret, str):
223 227 ret = struct.unpack(self.p.indexformat, ret)
224 228 return ret
225 229 def __setitem__(self, pos, item):
226 230 self.p.index[pos] = item
227 231 def __delitem__(self, pos):
228 232 del self.p.index[pos]
229 233 def append(self, e):
230 234 self.p.index.append(e)
231 235
232 236 class lazymap(object):
233 237 """a lazy version of the node map"""
234 238 def __init__(self, parser):
235 239 self.p = parser
236 240 def load(self, key):
237 241 n = self.p.findnode(key)
238 242 if n == None:
239 243 raise KeyError(key)
240 244 def __contains__(self, key):
241 245 if key in self.p.map:
242 246 return True
243 247 self.p.loadmap()
244 248 return key in self.p.map
245 249 def __iter__(self):
246 250 yield nullid
247 251 for i in xrange(self.p.l):
248 252 ret = self.p.index[i]
249 253 if not ret:
250 254 self.p.loadindex(i)
251 255 ret = self.p.index[i]
252 256 if isinstance(ret, str):
253 257 ret = struct.unpack(self.p.indexformat, ret)
254 258 yield ret[-1]
255 259 def __getitem__(self, key):
256 260 try:
257 261 return self.p.map[key]
258 262 except KeyError:
259 263 try:
260 264 self.load(key)
261 265 return self.p.map[key]
262 266 except KeyError:
263 267 raise KeyError("node " + hex(key))
264 268 def __setitem__(self, key, val):
265 269 self.p.map[key] = val
266 270 def __delitem__(self, key):
267 271 del self.p.map[key]
268 272
269 273 class RevlogError(Exception): pass
270 274
271 275 class revlog(object):
272 276 """
273 277 the underlying revision storage object
274 278
275 279 A revlog consists of two parts, an index and the revision data.
276 280
277 281 The index is a file with a fixed record size containing
278 282 information on each revision, includings its nodeid (hash), the
279 283 nodeids of its parents, the position and offset of its data within
280 284 the data file, and the revision it's based on. Finally, each entry
281 285 contains a linkrev entry that can serve as a pointer to external
282 286 data.
283 287
284 288 The revision data itself is a linear collection of data chunks.
285 289 Each chunk represents a revision and is usually represented as a
286 290 delta against the previous chunk. To bound lookup time, runs of
287 291 deltas are limited to about 2 times the length of the original
288 292 version data. This makes retrieval of a version proportional to
289 293 its size, or O(1) relative to the number of revisions.
290 294
291 295 Both pieces of the revlog are written to in an append-only
292 296 fashion, which means we never need to rewrite a file to insert or
293 297 remove data, and can use some simple techniques to avoid the need
294 298 for locking while reading.
295 299 """
296 def __init__(self, opener, indexfile, datafile, defversion=REVLOGV0):
300 def __init__(self, opener, indexfile, datafile,
301 defversion=REVLOG_DEFAULT_VERSION):
297 302 """
298 303 create a revlog object
299 304
300 305 opener is a function that abstracts the file opening operation
301 306 and can be used to implement COW semantics or the like.
302 307 """
303 308 self.indexfile = indexfile
304 309 self.datafile = datafile
305 310 self.opener = opener
306 311
307 312 self.indexstat = None
308 313 self.cache = None
309 314 self.chunkcache = None
310 315 self.defversion = defversion
311 316 self.load()
312 317
313 318 def load(self):
314 319 v = self.defversion
315 320 try:
316 321 f = self.opener(self.indexfile)
317 322 i = f.read(4)
318 323 f.seek(0)
319 324 except IOError, inst:
320 325 if inst.errno != errno.ENOENT:
321 326 raise
322 327 i = ""
323 328 else:
324 329 try:
325 330 st = util.fstat(f)
326 331 except AttributeError, inst:
327 332 st = None
328 333 else:
329 334 oldst = self.indexstat
330 335 if (oldst and st.st_dev == oldst.st_dev
331 336 and st.st_ino == oldst.st_ino
332 337 and st.st_mtime == oldst.st_mtime
333 338 and st.st_ctime == oldst.st_ctime):
334 339 return
335 340 self.indexstat = st
336 341 if len(i) > 0:
337 342 v = struct.unpack(versionformat, i)[0]
338 343 flags = v & ~0xFFFF
339 344 fmt = v & 0xFFFF
340 345 if fmt == REVLOGV0:
341 346 if flags:
342 347 raise RevlogError(_("index %s invalid flags %x for format v0" %
343 348 (self.indexfile, flags)))
344 349 elif fmt == REVLOGNG:
345 350 if flags & ~REVLOGNGINLINEDATA:
346 351 raise RevlogError(_("index %s invalid flags %x for revlogng" %
347 352 (self.indexfile, flags)))
348 353 else:
349 354 raise RevlogError(_("index %s invalid format %d" %
350 355 (self.indexfile, fmt)))
351 356 self.version = v
352 357 if v == REVLOGV0:
353 358 self.indexformat = indexformatv0
354 359 shaoffset = v0shaoffset
355 360 else:
356 361 self.indexformat = indexformatng
357 362 shaoffset = ngshaoffset
358 363
359 364 if i:
360 365 if not self.inlinedata() and st and st.st_size > 10000:
361 366 # big index, let's parse it on demand
362 367 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
363 368 self.index = lazyindex(parser)
364 369 self.nodemap = lazymap(parser)
365 370 else:
366 371 i = f.read()
367 372 self.parseindex(i)
368 373 if self.inlinedata():
369 374 # we've already got the entire data file read in, save it
370 375 # in the chunk data
371 376 self.chunkcache = (0, i)
372 377 if self.version != REVLOGV0:
373 378 e = list(self.index[0])
374 379 type = self.ngtype(e[0])
375 380 e[0] = self.offset_type(0, type)
376 381 self.index[0] = e
377 382 else:
378 383 self.nodemap = { nullid: -1}
379 384 self.index = []
380 385
381 386
382 387 def parseindex(self, data):
383 388 s = struct.calcsize(self.indexformat)
384 389 l = len(data)
385 390 self.index = []
386 391 self.nodemap = {nullid: -1}
387 392 inline = self.inlinedata()
388 393 off = 0
389 394 n = 0
390 395 while off < l:
391 396 e = struct.unpack(self.indexformat, data[off:off + s])
392 397 self.index.append(e)
393 398 self.nodemap[e[-1]] = n
394 399 n += 1
395 400 off += s
396 401 if inline:
397 402 off += e[1]
398 403
399 404 def ngoffset(self, q):
400 405 if q & 0xFFFF:
401 406 raise RevlogError(_('%s: incompatible revision flag %x') %
402 407 (self.indexfile, q))
403 408 return long(q >> 16)
404 409
405 410 def ngtype(self, q):
406 411 return int(q & 0xFFFF)
407 412
408 413 def offset_type(self, offset, type):
409 414 return long(long(offset) << 16 | type)
410 415
411 416 def loadindex(self, start, end):
412 417 """load a block of indexes all at once from the lazy parser"""
413 418 if isinstance(self.index, lazyindex):
414 419 self.index.p.loadindex(start, end)
415 420
416 421 def loadindexmap(self):
417 422 """loads both the map and the index from the lazy parser"""
418 423 if isinstance(self.index, lazyindex):
419 424 p = self.index.p
420 425 p.loadindex()
421 426 self.nodemap = p.map
422 427
423 428 def loadmap(self):
424 429 """loads the map from the lazy parser"""
425 430 if isinstance(self.nodemap, lazymap):
426 431 self.nodemap.p.loadmap()
427 432 self.nodemap = self.nodemap.p.map
428 433
429 434 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
430 435 def tip(self): return self.node(len(self.index) - 1)
431 436 def count(self): return len(self.index)
432 437 def node(self, rev):
433 438 return (rev < 0) and nullid or self.index[rev][-1]
434 439 def rev(self, node):
435 440 try:
436 441 return self.nodemap[node]
437 442 except KeyError:
438 443 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
439 444 def linkrev(self, node): return self.index[self.rev(node)][-4]
440 445 def parents(self, node):
441 446 if node == nullid: return (nullid, nullid)
442 447 r = self.rev(node)
443 448 d = self.index[r][-3:-1]
444 449 if self.version == REVLOGV0:
445 450 return d
446 451 return [ self.node(x) for x in d ]
447 452 def start(self, rev):
448 453 if rev < 0:
449 454 return -1
450 455 if self.version != REVLOGV0:
451 456 return self.ngoffset(self.index[rev][0])
452 457 return self.index[rev][0]
453 458
454 459 def end(self, rev): return self.start(rev) + self.length(rev)
455 460
456 461 def size(self, rev):
457 462 """return the length of the uncompressed text for a given revision"""
458 463 l = -1
459 464 if self.version != REVLOGV0:
460 465 l = self.index[rev][2]
461 466 if l >= 0:
462 467 return l
463 468
464 469 t = self.revision(self.node(rev))
465 470 return len(t)
466 471
467 472 # alternate implementation, The advantage to this code is it
468 473 # will be faster for a single revision. But, the results are not
469 474 # cached, so finding the size of every revision will be slower.
470 475 """
471 476 if self.cache and self.cache[1] == rev:
472 477 return len(self.cache[2])
473 478
474 479 base = self.base(rev)
475 480 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
476 481 base = self.cache[1]
477 482 text = self.cache[2]
478 483 else:
479 484 text = self.revision(self.node(base))
480 485
481 486 l = len(text)
482 487 for x in xrange(base + 1, rev + 1):
483 488 l = mdiff.patchedsize(l, self.chunk(x))
484 489 return l
485 490 """
486 491
487 492 def length(self, rev):
488 493 if rev < 0:
489 494 return 0
490 495 else:
491 496 return self.index[rev][1]
492 497 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
493 498
494 499 def reachable(self, rev, stop=None):
495 500 reachable = {}
496 501 visit = [rev]
497 502 reachable[rev] = 1
498 503 if stop:
499 504 stopn = self.rev(stop)
500 505 else:
501 506 stopn = 0
502 507 while visit:
503 508 n = visit.pop(0)
504 509 if n == stop:
505 510 continue
506 511 if n == nullid:
507 512 continue
508 513 for p in self.parents(n):
509 514 if self.rev(p) < stopn:
510 515 continue
511 516 if p not in reachable:
512 517 reachable[p] = 1
513 518 visit.append(p)
514 519 return reachable
515 520
516 521 def nodesbetween(self, roots=None, heads=None):
517 522 """Return a tuple containing three elements. Elements 1 and 2 contain
518 523 a final list bases and heads after all the unreachable ones have been
519 524 pruned. Element 0 contains a topologically sorted list of all
520 525
521 526 nodes that satisfy these constraints:
522 527 1. All nodes must be descended from a node in roots (the nodes on
523 528 roots are considered descended from themselves).
524 529 2. All nodes must also be ancestors of a node in heads (the nodes in
525 530 heads are considered to be their own ancestors).
526 531
527 532 If roots is unspecified, nullid is assumed as the only root.
528 533 If heads is unspecified, it is taken to be the output of the
529 534 heads method (i.e. a list of all nodes in the repository that
530 535 have no children)."""
531 536 nonodes = ([], [], [])
532 537 if roots is not None:
533 538 roots = list(roots)
534 539 if not roots:
535 540 return nonodes
536 541 lowestrev = min([self.rev(n) for n in roots])
537 542 else:
538 543 roots = [nullid] # Everybody's a descendent of nullid
539 544 lowestrev = -1
540 545 if (lowestrev == -1) and (heads is None):
541 546 # We want _all_ the nodes!
542 547 return ([self.node(r) for r in xrange(0, self.count())],
543 548 [nullid], list(self.heads()))
544 549 if heads is None:
545 550 # All nodes are ancestors, so the latest ancestor is the last
546 551 # node.
547 552 highestrev = self.count() - 1
548 553 # Set ancestors to None to signal that every node is an ancestor.
549 554 ancestors = None
550 555 # Set heads to an empty dictionary for later discovery of heads
551 556 heads = {}
552 557 else:
553 558 heads = list(heads)
554 559 if not heads:
555 560 return nonodes
556 561 ancestors = {}
557 562 # Start at the top and keep marking parents until we're done.
558 563 nodestotag = heads[:]
559 564 # Turn heads into a dictionary so we can remove 'fake' heads.
560 565 # Also, later we will be using it to filter out the heads we can't
561 566 # find from roots.
562 567 heads = dict.fromkeys(heads, 0)
563 568 # Remember where the top was so we can use it as a limit later.
564 569 highestrev = max([self.rev(n) for n in nodestotag])
565 570 while nodestotag:
566 571 # grab a node to tag
567 572 n = nodestotag.pop()
568 573 # Never tag nullid
569 574 if n == nullid:
570 575 continue
571 576 # A node's revision number represents its place in a
572 577 # topologically sorted list of nodes.
573 578 r = self.rev(n)
574 579 if r >= lowestrev:
575 580 if n not in ancestors:
576 581 # If we are possibly a descendent of one of the roots
577 582 # and we haven't already been marked as an ancestor
578 583 ancestors[n] = 1 # Mark as ancestor
579 584 # Add non-nullid parents to list of nodes to tag.
580 585 nodestotag.extend([p for p in self.parents(n) if
581 586 p != nullid])
582 587 elif n in heads: # We've seen it before, is it a fake head?
583 588 # So it is, real heads should not be the ancestors of
584 589 # any other heads.
585 590 heads.pop(n)
586 591 if not ancestors:
587 592 return nonodes
588 593 # Now that we have our set of ancestors, we want to remove any
589 594 # roots that are not ancestors.
590 595
591 596 # If one of the roots was nullid, everything is included anyway.
592 597 if lowestrev > -1:
593 598 # But, since we weren't, let's recompute the lowest rev to not
594 599 # include roots that aren't ancestors.
595 600
596 601 # Filter out roots that aren't ancestors of heads
597 602 roots = [n for n in roots if n in ancestors]
598 603 # Recompute the lowest revision
599 604 if roots:
600 605 lowestrev = min([self.rev(n) for n in roots])
601 606 else:
602 607 # No more roots? Return empty list
603 608 return nonodes
604 609 else:
605 610 # We are descending from nullid, and don't need to care about
606 611 # any other roots.
607 612 lowestrev = -1
608 613 roots = [nullid]
609 614 # Transform our roots list into a 'set' (i.e. a dictionary where the
610 615 # values don't matter.
611 616 descendents = dict.fromkeys(roots, 1)
612 617 # Also, keep the original roots so we can filter out roots that aren't
613 618 # 'real' roots (i.e. are descended from other roots).
614 619 roots = descendents.copy()
615 620 # Our topologically sorted list of output nodes.
616 621 orderedout = []
617 622 # Don't start at nullid since we don't want nullid in our output list,
618 623 # and if nullid shows up in descedents, empty parents will look like
619 624 # they're descendents.
620 625 for r in xrange(max(lowestrev, 0), highestrev + 1):
621 626 n = self.node(r)
622 627 isdescendent = False
623 628 if lowestrev == -1: # Everybody is a descendent of nullid
624 629 isdescendent = True
625 630 elif n in descendents:
626 631 # n is already a descendent
627 632 isdescendent = True
628 633 # This check only needs to be done here because all the roots
629 634 # will start being marked is descendents before the loop.
630 635 if n in roots:
631 636 # If n was a root, check if it's a 'real' root.
632 637 p = tuple(self.parents(n))
633 638 # If any of its parents are descendents, it's not a root.
634 639 if (p[0] in descendents) or (p[1] in descendents):
635 640 roots.pop(n)
636 641 else:
637 642 p = tuple(self.parents(n))
638 643 # A node is a descendent if either of its parents are
639 644 # descendents. (We seeded the dependents list with the roots
640 645 # up there, remember?)
641 646 if (p[0] in descendents) or (p[1] in descendents):
642 647 descendents[n] = 1
643 648 isdescendent = True
644 649 if isdescendent and ((ancestors is None) or (n in ancestors)):
645 650 # Only include nodes that are both descendents and ancestors.
646 651 orderedout.append(n)
647 652 if (ancestors is not None) and (n in heads):
648 653 # We're trying to figure out which heads are reachable
649 654 # from roots.
650 655 # Mark this head as having been reached
651 656 heads[n] = 1
652 657 elif ancestors is None:
653 658 # Otherwise, we're trying to discover the heads.
654 659 # Assume this is a head because if it isn't, the next step
655 660 # will eventually remove it.
656 661 heads[n] = 1
657 662 # But, obviously its parents aren't.
658 663 for p in self.parents(n):
659 664 heads.pop(p, None)
660 665 heads = [n for n in heads.iterkeys() if heads[n] != 0]
661 666 roots = roots.keys()
662 667 assert orderedout
663 668 assert roots
664 669 assert heads
665 670 return (orderedout, roots, heads)
666 671
667 672 def heads(self, start=None):
668 673 """return the list of all nodes that have no children
669 674
670 675 if start is specified, only heads that are descendants of
671 676 start will be returned
672 677
673 678 """
674 679 if start is None:
675 680 start = nullid
676 681 reachable = {start: 1}
677 682 heads = {start: 1}
678 683 startrev = self.rev(start)
679 684
680 685 for r in xrange(startrev + 1, self.count()):
681 686 n = self.node(r)
682 687 for pn in self.parents(n):
683 688 if pn in reachable:
684 689 reachable[n] = 1
685 690 heads[n] = 1
686 691 if pn in heads:
687 692 del heads[pn]
688 693 return heads.keys()
689 694
690 695 def children(self, node):
691 696 """find the children of a given node"""
692 697 c = []
693 698 p = self.rev(node)
694 699 for r in range(p + 1, self.count()):
695 700 n = self.node(r)
696 701 for pn in self.parents(n):
697 702 if pn == node:
698 703 c.append(n)
699 704 continue
700 705 elif pn == nullid:
701 706 continue
702 707 return c
703 708
704 709 def lookup(self, id):
705 710 """locate a node based on revision number or subset of hex nodeid"""
706 711 try:
707 712 rev = int(id)
708 713 if str(rev) != id: raise ValueError
709 714 if rev < 0: rev = self.count() + rev
710 715 if rev < 0 or rev >= self.count(): raise ValueError
711 716 return self.node(rev)
712 717 except (ValueError, OverflowError):
713 718 c = []
714 719 for n in self.nodemap:
715 720 if hex(n).startswith(id):
716 721 c.append(n)
717 722 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
718 723 if len(c) < 1: raise RevlogError(_("No match found"))
719 724 return c[0]
720 725
721 726 return None
722 727
723 728 def diff(self, a, b):
724 729 """return a delta between two revisions"""
725 730 return mdiff.textdiff(a, b)
726 731
727 732 def patches(self, t, pl):
728 733 """apply a list of patches to a string"""
729 734 return mdiff.patches(t, pl)
730 735
731 736 def chunk(self, rev, df=None, cachelen=4096):
732 737 start, length = self.start(rev), self.length(rev)
733 738 inline = self.inlinedata()
734 739 if inline:
735 740 start += (rev + 1) * struct.calcsize(self.indexformat)
736 741 end = start + length
737 742 def loadcache(df):
738 743 cache_length = max(cachelen, length) # 4k
739 744 if not df:
740 745 if inline:
741 746 df = self.opener(self.indexfile)
742 747 else:
743 748 df = self.opener(self.datafile)
744 749 df.seek(start)
745 750 self.chunkcache = (start, df.read(cache_length))
746 751
747 752 if not self.chunkcache:
748 753 loadcache(df)
749 754
750 755 cache_start = self.chunkcache[0]
751 756 cache_end = cache_start + len(self.chunkcache[1])
752 757 if start >= cache_start and end <= cache_end:
753 758 # it is cached
754 759 offset = start - cache_start
755 760 else:
756 761 loadcache(df)
757 762 offset = 0
758 763
759 764 #def checkchunk():
760 765 # df = self.opener(self.datafile)
761 766 # df.seek(start)
762 767 # return df.read(length)
763 768 #assert s == checkchunk()
764 769 return decompress(self.chunkcache[1][offset:offset + length])
765 770
766 771 def delta(self, node):
767 772 """return or calculate a delta between a node and its predecessor"""
768 773 r = self.rev(node)
769 774 return self.revdiff(r - 1, r)
770 775
771 776 def revdiff(self, rev1, rev2):
772 777 """return or calculate a delta between two revisions"""
773 778 b1 = self.base(rev1)
774 779 b2 = self.base(rev2)
775 780 if b1 == b2 and rev1 + 1 == rev2:
776 781 return self.chunk(rev2)
777 782 else:
778 783 return self.diff(self.revision(self.node(rev1)),
779 784 self.revision(self.node(rev2)))
780 785
781 786 def revision(self, node):
782 787 """return an uncompressed revision of a given"""
783 788 if node == nullid: return ""
784 789 if self.cache and self.cache[0] == node: return self.cache[2]
785 790
786 791 # look up what we need to read
787 792 text = None
788 793 rev = self.rev(node)
789 794 base = self.base(rev)
790 795
791 796 if self.inlinedata():
792 797 # we probably have the whole chunk cached
793 798 df = None
794 799 else:
795 800 df = self.opener(self.datafile)
796 801
797 802 # do we have useful data cached?
798 803 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
799 804 base = self.cache[1]
800 805 text = self.cache[2]
801 806 self.loadindex(base, rev + 1)
802 807 else:
803 808 self.loadindex(base, rev + 1)
804 809 text = self.chunk(base, df=df)
805 810
806 811 bins = []
807 812 for r in xrange(base + 1, rev + 1):
808 813 bins.append(self.chunk(r, df=df))
809 814
810 815 text = self.patches(text, bins)
811 816
812 817 p1, p2 = self.parents(node)
813 818 if node != hash(text, p1, p2):
814 819 raise RevlogError(_("integrity check failed on %s:%d")
815 820 % (self.datafile, rev))
816 821
817 822 self.cache = (node, rev, text)
818 823 return text
819 824
820 825 def checkinlinesize(self, tr, fp=None):
821 826 if not self.inlinedata():
822 827 return
823 828 if not fp:
824 829 fp = self.opener(self.indexfile, 'r')
825 830 fp.seek(0, 2)
826 831 size = fp.tell()
827 832 if size < 131072:
828 833 return
829 834 trinfo = tr.find(self.indexfile)
830 835 if trinfo == None:
831 836 raise RevlogError(_("%s not found in the transaction" %
832 837 self.indexfile))
833 838
834 839 trindex = trinfo[2]
835 840 dataoff = self.start(trindex)
836 841
837 842 tr.add(self.datafile, dataoff)
838 843 df = self.opener(self.datafile, 'w')
839 844 calc = struct.calcsize(self.indexformat)
840 845 for r in xrange(self.count()):
841 846 start = self.start(r) + (r + 1) * calc
842 847 length = self.length(r)
843 848 fp.seek(start)
844 849 d = fp.read(length)
845 850 df.write(d)
846 851 fp.close()
847 852 df.close()
848 853 fp = self.opener(self.indexfile, 'w', atomictemp=True)
849 854 self.version &= ~(REVLOGNGINLINEDATA)
850 855 if self.count():
851 856 x = self.index[0]
852 857 e = struct.pack(self.indexformat, *x)[4:]
853 858 l = struct.pack(versionformat, self.version)
854 859 fp.write(l)
855 860 fp.write(e)
856 861
857 862 for i in xrange(1, self.count()):
858 863 x = self.index[i]
859 864 e = struct.pack(self.indexformat, *x)
860 865 fp.write(e)
861 866
862 867 # if we don't call rename, the temp file will never replace the
863 868 # real index
864 869 fp.rename()
865 870
866 871 tr.replace(self.indexfile, trindex * calc)
867 872 self.chunkcache = None
868 873
869 874 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
870 875 """add a revision to the log
871 876
872 877 text - the revision data to add
873 878 transaction - the transaction object used for rollback
874 879 link - the linkrev data to add
875 880 p1, p2 - the parent nodeids of the revision
876 881 d - an optional precomputed delta
877 882 """
878 883 if text is None: text = ""
879 884 if p1 is None: p1 = self.tip()
880 885 if p2 is None: p2 = nullid
881 886
882 887 node = hash(text, p1, p2)
883 888
884 889 if node in self.nodemap:
885 890 return node
886 891
887 892 n = self.count()
888 893 t = n - 1
889 894
890 895 if n:
891 896 base = self.base(t)
892 897 start = self.start(base)
893 898 end = self.end(t)
894 899 if not d:
895 900 prev = self.revision(self.tip())
896 901 d = self.diff(prev, str(text))
897 902 data = compress(d)
898 903 l = len(data[1]) + len(data[0])
899 904 dist = end - start + l
900 905
901 906 # full versions are inserted when the needed deltas
902 907 # become comparable to the uncompressed text
903 908 if not n or dist > len(text) * 2:
904 909 data = compress(text)
905 910 l = len(data[1]) + len(data[0])
906 911 base = n
907 912 else:
908 913 base = self.base(t)
909 914
910 915 offset = 0
911 916 if t >= 0:
912 917 offset = self.end(t)
913 918
914 919 if self.version == REVLOGV0:
915 920 e = (offset, l, base, link, p1, p2, node)
916 921 else:
917 922 e = (self.offset_type(offset, 0), l, len(text),
918 923 base, link, self.rev(p1), self.rev(p2), node)
919 924
920 925 self.index.append(e)
921 926 self.nodemap[node] = n
922 927 entry = struct.pack(self.indexformat, *e)
923 928
924 929 if not self.inlinedata():
925 930 transaction.add(self.datafile, offset)
926 931 transaction.add(self.indexfile, n * len(entry))
927 932 f = self.opener(self.datafile, "a")
928 933 if data[0]:
929 934 f.write(data[0])
930 935 f.write(data[1])
931 936 f.close()
932 937 f = self.opener(self.indexfile, "a")
933 938 else:
934 939 f = self.opener(self.indexfile, "a+")
935 940 f.seek(0, 2)
936 941 transaction.add(self.indexfile, f.tell(), self.count() - 1)
937 942
938 943 if len(self.index) == 1 and self.version != REVLOGV0:
939 944 l = struct.pack(versionformat, self.version)
940 945 f.write(l)
941 946 entry = entry[4:]
942 947
943 948 f.write(entry)
944 949
945 950 if self.inlinedata():
946 951 f.write(data[0])
947 952 f.write(data[1])
948 953 self.checkinlinesize(transaction, f)
949 954
950 955 self.cache = (node, n, text)
951 956 return node
952 957
953 958 def ancestor(self, a, b):
954 959 """calculate the least common ancestor of nodes a and b"""
955 960
956 961 # start with some short cuts for the linear cases
957 962 if a == b:
958 963 return a
959 964 ra = self.rev(a)
960 965 rb = self.rev(b)
961 966 if ra < rb:
962 967 last = b
963 968 first = a
964 969 else:
965 970 last = a
966 971 first = b
967 972
968 973 # reachable won't include stop in the list, so we have to use a parent
969 974 reachable = self.reachable(last, stop=self.parents(first)[0])
970 975 if first in reachable:
971 976 return first
972 977
973 978 # calculate the distance of every node from root
974 979 dist = {nullid: 0}
975 980 for i in xrange(self.count()):
976 981 n = self.node(i)
977 982 p1, p2 = self.parents(n)
978 983 dist[n] = max(dist[p1], dist[p2]) + 1
979 984
980 985 # traverse ancestors in order of decreasing distance from root
981 986 def ancestors(node):
982 987 # we store negative distances because heap returns smallest member
983 988 h = [(-dist[node], node)]
984 989 seen = {}
985 990 while h:
986 991 d, n = heapq.heappop(h)
987 992 if n not in seen:
988 993 seen[n] = 1
989 994 yield (-d, n)
990 995 for p in self.parents(n):
991 996 heapq.heappush(h, (-dist[p], p))
992 997
993 998 def generations(node):
994 999 sg, s = None, {}
995 1000 for g,n in ancestors(node):
996 1001 if g != sg:
997 1002 if sg:
998 1003 yield sg, s
999 1004 sg, s = g, {n:1}
1000 1005 else:
1001 1006 s[n] = 1
1002 1007 yield sg, s
1003 1008
1004 1009 x = generations(a)
1005 1010 y = generations(b)
1006 1011 gx = x.next()
1007 1012 gy = y.next()
1008 1013
1009 1014 # increment each ancestor list until it is closer to root than
1010 1015 # the other, or they match
1011 1016 while 1:
1012 1017 #print "ancestor gen %s %s" % (gx[0], gy[0])
1013 1018 if gx[0] == gy[0]:
1014 1019 # find the intersection
1015 1020 i = [ n for n in gx[1] if n in gy[1] ]
1016 1021 if i:
1017 1022 return i[0]
1018 1023 else:
1019 1024 #print "next"
1020 1025 gy = y.next()
1021 1026 gx = x.next()
1022 1027 elif gx[0] < gy[0]:
1023 1028 #print "next y"
1024 1029 gy = y.next()
1025 1030 else:
1026 1031 #print "next x"
1027 1032 gx = x.next()
1028 1033
1029 1034 def group(self, nodelist, lookup, infocollect=None):
1030 1035 """calculate a delta group
1031 1036
1032 1037 Given a list of changeset revs, return a set of deltas and
1033 1038 metadata corresponding to nodes. the first delta is
1034 1039 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1035 1040 have this parent as it has all history before these
1036 1041 changesets. parent is parent[0]
1037 1042 """
1038 1043 revs = [self.rev(n) for n in nodelist]
1039 1044
1040 1045 # if we don't have any revisions touched by these changesets, bail
1041 1046 if not revs:
1042 1047 yield changegroup.closechunk()
1043 1048 return
1044 1049
1045 1050 # add the parent of the first rev
1046 1051 p = self.parents(self.node(revs[0]))[0]
1047 1052 revs.insert(0, self.rev(p))
1048 1053
1049 1054 # build deltas
1050 1055 for d in xrange(0, len(revs) - 1):
1051 1056 a, b = revs[d], revs[d + 1]
1052 1057 nb = self.node(b)
1053 1058
1054 1059 if infocollect is not None:
1055 1060 infocollect(nb)
1056 1061
1057 1062 d = self.revdiff(a, b)
1058 1063 p = self.parents(nb)
1059 1064 meta = nb + p[0] + p[1] + lookup(nb)
1060 1065 yield changegroup.genchunk("%s%s" % (meta, d))
1061 1066
1062 1067 yield changegroup.closechunk()
1063 1068
1064 1069 def addgroup(self, revs, linkmapper, transaction, unique=0):
1065 1070 """
1066 1071 add a delta group
1067 1072
1068 1073 given a set of deltas, add them to the revision log. the
1069 1074 first delta is against its parent, which should be in our
1070 1075 log, the rest are against the previous delta.
1071 1076 """
1072 1077
1073 1078 #track the base of the current delta log
1074 1079 r = self.count()
1075 1080 t = r - 1
1076 1081 node = None
1077 1082
1078 1083 base = prev = -1
1079 1084 start = end = textlen = 0
1080 1085 if r:
1081 1086 end = self.end(t)
1082 1087
1083 1088 ifh = self.opener(self.indexfile, "a+")
1084 1089 ifh.seek(0, 2)
1085 1090 transaction.add(self.indexfile, ifh.tell(), self.count())
1086 1091 if self.inlinedata():
1087 1092 dfh = None
1088 1093 else:
1089 1094 transaction.add(self.datafile, end)
1090 1095 dfh = self.opener(self.datafile, "a")
1091 1096
1092 1097 # loop through our set of deltas
1093 1098 chain = None
1094 1099 for chunk in revs:
1095 1100 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1096 1101 link = linkmapper(cs)
1097 1102 if node in self.nodemap:
1098 1103 # this can happen if two branches make the same change
1099 1104 # if unique:
1100 1105 # raise RevlogError(_("already have %s") % hex(node[:4]))
1101 1106 chain = node
1102 1107 continue
1103 1108 delta = chunk[80:]
1104 1109
1105 1110 for p in (p1, p2):
1106 1111 if not p in self.nodemap:
1107 1112 raise RevlogError(_("unknown parent %s") % short(p1))
1108 1113
1109 1114 if not chain:
1110 1115 # retrieve the parent revision of the delta chain
1111 1116 chain = p1
1112 1117 if not chain in self.nodemap:
1113 1118 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1114 1119
1115 1120 # full versions are inserted when the needed deltas become
1116 1121 # comparable to the uncompressed text or when the previous
1117 1122 # version is not the one we have a delta against. We use
1118 1123 # the size of the previous full rev as a proxy for the
1119 1124 # current size.
1120 1125
1121 1126 if chain == prev:
1122 1127 tempd = compress(delta)
1123 1128 cdelta = tempd[0] + tempd[1]
1124 1129 textlen = mdiff.patchedsize(textlen, delta)
1125 1130
1126 1131 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1127 1132 # flush our writes here so we can read it in revision
1128 1133 if dfh:
1129 1134 dfh.flush()
1130 1135 ifh.flush()
1131 1136 text = self.revision(chain)
1132 1137 text = self.patches(text, [delta])
1133 1138 chk = self.addrevision(text, transaction, link, p1, p2)
1134 1139 if chk != node:
1135 1140 raise RevlogError(_("consistency error adding group"))
1136 1141 textlen = len(text)
1137 1142 else:
1138 1143 if self.version == REVLOGV0:
1139 1144 e = (end, len(cdelta), base, link, p1, p2, node)
1140 1145 else:
1141 1146 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1142 1147 link, self.rev(p1), self.rev(p2), node)
1143 1148 self.index.append(e)
1144 1149 self.nodemap[node] = r
1145 1150 if self.inlinedata():
1146 1151 ifh.write(struct.pack(self.indexformat, *e))
1147 1152 ifh.write(cdelta)
1148 1153 self.checkinlinesize(transaction, ifh)
1149 1154 if not self.inlinedata():
1150 1155 dfh = self.opener(self.datafile, "a")
1151 1156 ifh = self.opener(self.indexfile, "a")
1152 1157 else:
1153 1158 if not dfh:
1154 1159 # addrevision switched from inline to conventional
1155 1160 # reopen the index
1156 1161 dfh = self.opener(self.datafile, "a")
1157 1162 ifh = self.opener(self.indexfile, "a")
1158 1163 dfh.write(cdelta)
1159 1164 ifh.write(struct.pack(self.indexformat, *e))
1160 1165
1161 1166 t, r, chain, prev = r, r + 1, node, node
1162 1167 base = self.base(t)
1163 1168 start = self.start(base)
1164 1169 end = self.end(t)
1165 1170
1166 1171 if node is None:
1167 1172 raise RevlogError(_("group to be added is empty"))
1168 1173 return node
1169 1174
1170 1175 def strip(self, rev, minlink):
1171 1176 if self.count() == 0 or rev >= self.count():
1172 1177 return
1173 1178
1174 1179 if isinstance(self.index, lazyindex):
1175 1180 self.loadindexmap()
1176 1181
1177 1182 # When stripping away a revision, we need to make sure it
1178 1183 # does not actually belong to an older changeset.
1179 1184 # The minlink parameter defines the oldest revision
1180 1185 # we're allowed to strip away.
1181 1186 while minlink > self.index[rev][-4]:
1182 1187 rev += 1
1183 1188 if rev >= self.count():
1184 1189 return
1185 1190
1186 1191 # first truncate the files on disk
1187 1192 end = self.start(rev)
1188 1193 if not self.inlinedata():
1189 1194 df = self.opener(self.datafile, "a")
1190 1195 df.truncate(end)
1191 1196 end = rev * struct.calcsize(self.indexformat)
1192 1197 else:
1193 1198 end += rev * struct.calcsize(self.indexformat)
1194 1199
1195 1200 indexf = self.opener(self.indexfile, "a")
1196 1201 indexf.truncate(end)
1197 1202
1198 1203 # then reset internal state in memory to forget those revisions
1199 1204 self.cache = None
1200 1205 self.chunkcache = None
1201 1206 for x in xrange(rev, self.count()):
1202 1207 del self.nodemap[self.node(x)]
1203 1208
1204 1209 del self.index[rev:]
1205 1210
1206 1211 def checksize(self):
1207 1212 expected = 0
1208 1213 if self.count():
1209 1214 expected = self.end(self.count() - 1)
1210 1215
1211 1216 try:
1212 1217 f = self.opener(self.datafile)
1213 1218 f.seek(0, 2)
1214 1219 actual = f.tell()
1215 1220 dd = actual - expected
1216 1221 except IOError, inst:
1217 1222 if inst.errno != errno.ENOENT:
1218 1223 raise
1219 1224 dd = 0
1220 1225
1221 1226 try:
1222 1227 f = self.opener(self.indexfile)
1223 1228 f.seek(0, 2)
1224 1229 actual = f.tell()
1225 1230 s = struct.calcsize(self.indexformat)
1226 1231 i = actual / s
1227 1232 di = actual - (i * s)
1228 1233 if self.inlinedata():
1229 1234 databytes = 0
1230 1235 for r in xrange(self.count()):
1231 1236 databytes += self.length(r)
1232 1237 dd = 0
1233 1238 di = actual - self.count() * s - databytes
1234 1239 except IOError, inst:
1235 1240 if inst.errno != errno.ENOENT:
1236 1241 raise
1237 1242 di = 0
1238 1243
1239 1244 return (dd, di)
1240 1245
1241 1246
General Comments 0
You need to be logged in to leave comments. Login now