##// END OF EJS Templates
verify: notice extra data in indices
Matt Mackall -
r1667:daff3ef0 default
parent child Browse files
Show More
@@ -1,1826 +1,1829 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __init__(self, ui, path=None, create=0):
16 def __init__(self, ui, path=None, create=0):
17 if not path:
17 if not path:
18 p = os.getcwd()
18 p = os.getcwd()
19 while not os.path.isdir(os.path.join(p, ".hg")):
19 while not os.path.isdir(os.path.join(p, ".hg")):
20 oldp = p
20 oldp = p
21 p = os.path.dirname(p)
21 p = os.path.dirname(p)
22 if p == oldp:
22 if p == oldp:
23 raise repo.RepoError(_("no repo found"))
23 raise repo.RepoError(_("no repo found"))
24 path = p
24 path = p
25 self.path = os.path.join(path, ".hg")
25 self.path = os.path.join(path, ".hg")
26
26
27 if not create and not os.path.isdir(self.path):
27 if not create and not os.path.isdir(self.path):
28 raise repo.RepoError(_("repository %s not found") % path)
28 raise repo.RepoError(_("repository %s not found") % path)
29
29
30 self.root = os.path.abspath(path)
30 self.root = os.path.abspath(path)
31 self.ui = ui
31 self.ui = ui
32 self.opener = util.opener(self.path)
32 self.opener = util.opener(self.path)
33 self.wopener = util.opener(self.root)
33 self.wopener = util.opener(self.root)
34 self.manifest = manifest.manifest(self.opener)
34 self.manifest = manifest.manifest(self.opener)
35 self.changelog = changelog.changelog(self.opener)
35 self.changelog = changelog.changelog(self.opener)
36 self.tagscache = None
36 self.tagscache = None
37 self.nodetagscache = None
37 self.nodetagscache = None
38 self.encodepats = None
38 self.encodepats = None
39 self.decodepats = None
39 self.decodepats = None
40
40
41 if create:
41 if create:
42 os.mkdir(self.path)
42 os.mkdir(self.path)
43 os.mkdir(self.join("data"))
43 os.mkdir(self.join("data"))
44
44
45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
46 try:
46 try:
47 self.ui.readconfig(self.join("hgrc"))
47 self.ui.readconfig(self.join("hgrc"))
48 except IOError:
48 except IOError:
49 pass
49 pass
50
50
51 def hook(self, name, **args):
51 def hook(self, name, **args):
52 def runhook(name, cmd):
52 def runhook(name, cmd):
53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
54 old = {}
54 old = {}
55 for k, v in args.items():
55 for k, v in args.items():
56 k = k.upper()
56 k = k.upper()
57 old[k] = os.environ.get(k, None)
57 old[k] = os.environ.get(k, None)
58 os.environ[k] = v
58 os.environ[k] = v
59
59
60 # Hooks run in the repository root
60 # Hooks run in the repository root
61 olddir = os.getcwd()
61 olddir = os.getcwd()
62 os.chdir(self.root)
62 os.chdir(self.root)
63 r = os.system(cmd)
63 r = os.system(cmd)
64 os.chdir(olddir)
64 os.chdir(olddir)
65
65
66 for k, v in old.items():
66 for k, v in old.items():
67 if v != None:
67 if v != None:
68 os.environ[k] = v
68 os.environ[k] = v
69 else:
69 else:
70 del os.environ[k]
70 del os.environ[k]
71
71
72 if r:
72 if r:
73 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
73 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
74 (name, r))
74 (name, r))
75 return False
75 return False
76 return True
76 return True
77
77
78 r = True
78 r = True
79 for hname, cmd in self.ui.configitems("hooks"):
79 for hname, cmd in self.ui.configitems("hooks"):
80 s = hname.split(".")
80 s = hname.split(".")
81 if s[0] == name and cmd:
81 if s[0] == name and cmd:
82 r = runhook(hname, cmd) and r
82 r = runhook(hname, cmd) and r
83 return r
83 return r
84
84
85 def tags(self):
85 def tags(self):
86 '''return a mapping of tag to node'''
86 '''return a mapping of tag to node'''
87 if not self.tagscache:
87 if not self.tagscache:
88 self.tagscache = {}
88 self.tagscache = {}
89 def addtag(self, k, n):
89 def addtag(self, k, n):
90 try:
90 try:
91 bin_n = bin(n)
91 bin_n = bin(n)
92 except TypeError:
92 except TypeError:
93 bin_n = ''
93 bin_n = ''
94 self.tagscache[k.strip()] = bin_n
94 self.tagscache[k.strip()] = bin_n
95
95
96 try:
96 try:
97 # read each head of the tags file, ending with the tip
97 # read each head of the tags file, ending with the tip
98 # and add each tag found to the map, with "newer" ones
98 # and add each tag found to the map, with "newer" ones
99 # taking precedence
99 # taking precedence
100 fl = self.file(".hgtags")
100 fl = self.file(".hgtags")
101 h = fl.heads()
101 h = fl.heads()
102 h.reverse()
102 h.reverse()
103 for r in h:
103 for r in h:
104 for l in fl.read(r).splitlines():
104 for l in fl.read(r).splitlines():
105 if l:
105 if l:
106 n, k = l.split(" ", 1)
106 n, k = l.split(" ", 1)
107 addtag(self, k, n)
107 addtag(self, k, n)
108 except KeyError:
108 except KeyError:
109 pass
109 pass
110
110
111 try:
111 try:
112 f = self.opener("localtags")
112 f = self.opener("localtags")
113 for l in f:
113 for l in f:
114 n, k = l.split(" ", 1)
114 n, k = l.split(" ", 1)
115 addtag(self, k, n)
115 addtag(self, k, n)
116 except IOError:
116 except IOError:
117 pass
117 pass
118
118
119 self.tagscache['tip'] = self.changelog.tip()
119 self.tagscache['tip'] = self.changelog.tip()
120
120
121 return self.tagscache
121 return self.tagscache
122
122
123 def tagslist(self):
123 def tagslist(self):
124 '''return a list of tags ordered by revision'''
124 '''return a list of tags ordered by revision'''
125 l = []
125 l = []
126 for t, n in self.tags().items():
126 for t, n in self.tags().items():
127 try:
127 try:
128 r = self.changelog.rev(n)
128 r = self.changelog.rev(n)
129 except:
129 except:
130 r = -2 # sort to the beginning of the list if unknown
130 r = -2 # sort to the beginning of the list if unknown
131 l.append((r, t, n))
131 l.append((r, t, n))
132 l.sort()
132 l.sort()
133 return [(t, n) for r, t, n in l]
133 return [(t, n) for r, t, n in l]
134
134
135 def nodetags(self, node):
135 def nodetags(self, node):
136 '''return the tags associated with a node'''
136 '''return the tags associated with a node'''
137 if not self.nodetagscache:
137 if not self.nodetagscache:
138 self.nodetagscache = {}
138 self.nodetagscache = {}
139 for t, n in self.tags().items():
139 for t, n in self.tags().items():
140 self.nodetagscache.setdefault(n, []).append(t)
140 self.nodetagscache.setdefault(n, []).append(t)
141 return self.nodetagscache.get(node, [])
141 return self.nodetagscache.get(node, [])
142
142
143 def lookup(self, key):
143 def lookup(self, key):
144 try:
144 try:
145 return self.tags()[key]
145 return self.tags()[key]
146 except KeyError:
146 except KeyError:
147 try:
147 try:
148 return self.changelog.lookup(key)
148 return self.changelog.lookup(key)
149 except:
149 except:
150 raise repo.RepoError(_("unknown revision '%s'") % key)
150 raise repo.RepoError(_("unknown revision '%s'") % key)
151
151
152 def dev(self):
152 def dev(self):
153 return os.stat(self.path).st_dev
153 return os.stat(self.path).st_dev
154
154
155 def local(self):
155 def local(self):
156 return True
156 return True
157
157
158 def join(self, f):
158 def join(self, f):
159 return os.path.join(self.path, f)
159 return os.path.join(self.path, f)
160
160
161 def wjoin(self, f):
161 def wjoin(self, f):
162 return os.path.join(self.root, f)
162 return os.path.join(self.root, f)
163
163
164 def file(self, f):
164 def file(self, f):
165 if f[0] == '/':
165 if f[0] == '/':
166 f = f[1:]
166 f = f[1:]
167 return filelog.filelog(self.opener, f)
167 return filelog.filelog(self.opener, f)
168
168
169 def getcwd(self):
169 def getcwd(self):
170 return self.dirstate.getcwd()
170 return self.dirstate.getcwd()
171
171
172 def wfile(self, f, mode='r'):
172 def wfile(self, f, mode='r'):
173 return self.wopener(f, mode)
173 return self.wopener(f, mode)
174
174
175 def wread(self, filename):
175 def wread(self, filename):
176 if self.encodepats == None:
176 if self.encodepats == None:
177 l = []
177 l = []
178 for pat, cmd in self.ui.configitems("encode"):
178 for pat, cmd in self.ui.configitems("encode"):
179 mf = util.matcher("", "/", [pat], [], [])[1]
179 mf = util.matcher("", "/", [pat], [], [])[1]
180 l.append((mf, cmd))
180 l.append((mf, cmd))
181 self.encodepats = l
181 self.encodepats = l
182
182
183 data = self.wopener(filename, 'r').read()
183 data = self.wopener(filename, 'r').read()
184
184
185 for mf, cmd in self.encodepats:
185 for mf, cmd in self.encodepats:
186 if mf(filename):
186 if mf(filename):
187 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
187 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
188 data = util.filter(data, cmd)
188 data = util.filter(data, cmd)
189 break
189 break
190
190
191 return data
191 return data
192
192
193 def wwrite(self, filename, data, fd=None):
193 def wwrite(self, filename, data, fd=None):
194 if self.decodepats == None:
194 if self.decodepats == None:
195 l = []
195 l = []
196 for pat, cmd in self.ui.configitems("decode"):
196 for pat, cmd in self.ui.configitems("decode"):
197 mf = util.matcher("", "/", [pat], [], [])[1]
197 mf = util.matcher("", "/", [pat], [], [])[1]
198 l.append((mf, cmd))
198 l.append((mf, cmd))
199 self.decodepats = l
199 self.decodepats = l
200
200
201 for mf, cmd in self.decodepats:
201 for mf, cmd in self.decodepats:
202 if mf(filename):
202 if mf(filename):
203 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
203 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
204 data = util.filter(data, cmd)
204 data = util.filter(data, cmd)
205 break
205 break
206
206
207 if fd:
207 if fd:
208 return fd.write(data)
208 return fd.write(data)
209 return self.wopener(filename, 'w').write(data)
209 return self.wopener(filename, 'w').write(data)
210
210
211 def transaction(self):
211 def transaction(self):
212 # save dirstate for undo
212 # save dirstate for undo
213 try:
213 try:
214 ds = self.opener("dirstate").read()
214 ds = self.opener("dirstate").read()
215 except IOError:
215 except IOError:
216 ds = ""
216 ds = ""
217 self.opener("journal.dirstate", "w").write(ds)
217 self.opener("journal.dirstate", "w").write(ds)
218
218
219 def after():
219 def after():
220 util.rename(self.join("journal"), self.join("undo"))
220 util.rename(self.join("journal"), self.join("undo"))
221 util.rename(self.join("journal.dirstate"),
221 util.rename(self.join("journal.dirstate"),
222 self.join("undo.dirstate"))
222 self.join("undo.dirstate"))
223
223
224 return transaction.transaction(self.ui.warn, self.opener,
224 return transaction.transaction(self.ui.warn, self.opener,
225 self.join("journal"), after)
225 self.join("journal"), after)
226
226
227 def recover(self):
227 def recover(self):
228 lock = self.lock()
228 lock = self.lock()
229 if os.path.exists(self.join("journal")):
229 if os.path.exists(self.join("journal")):
230 self.ui.status(_("rolling back interrupted transaction\n"))
230 self.ui.status(_("rolling back interrupted transaction\n"))
231 transaction.rollback(self.opener, self.join("journal"))
231 transaction.rollback(self.opener, self.join("journal"))
232 self.manifest = manifest.manifest(self.opener)
232 self.manifest = manifest.manifest(self.opener)
233 self.changelog = changelog.changelog(self.opener)
233 self.changelog = changelog.changelog(self.opener)
234 return True
234 return True
235 else:
235 else:
236 self.ui.warn(_("no interrupted transaction available\n"))
236 self.ui.warn(_("no interrupted transaction available\n"))
237 return False
237 return False
238
238
239 def undo(self):
239 def undo(self):
240 wlock = self.wlock()
240 wlock = self.wlock()
241 lock = self.lock()
241 lock = self.lock()
242 if os.path.exists(self.join("undo")):
242 if os.path.exists(self.join("undo")):
243 self.ui.status(_("rolling back last transaction\n"))
243 self.ui.status(_("rolling back last transaction\n"))
244 transaction.rollback(self.opener, self.join("undo"))
244 transaction.rollback(self.opener, self.join("undo"))
245 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
245 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
246 self.dirstate.read()
246 self.dirstate.read()
247 else:
247 else:
248 self.ui.warn(_("no undo information available\n"))
248 self.ui.warn(_("no undo information available\n"))
249
249
250 def lock(self, wait=1):
250 def lock(self, wait=1):
251 try:
251 try:
252 return lock.lock(self.join("lock"), 0)
252 return lock.lock(self.join("lock"), 0)
253 except lock.LockHeld, inst:
253 except lock.LockHeld, inst:
254 if wait:
254 if wait:
255 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
255 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
256 return lock.lock(self.join("lock"), wait)
256 return lock.lock(self.join("lock"), wait)
257 raise inst
257 raise inst
258
258
259 def wlock(self, wait=1):
259 def wlock(self, wait=1):
260 try:
260 try:
261 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
261 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
262 except lock.LockHeld, inst:
262 except lock.LockHeld, inst:
263 if not wait:
263 if not wait:
264 raise inst
264 raise inst
265 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
265 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
266 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
266 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
267 self.dirstate.read()
267 self.dirstate.read()
268 return wlock
268 return wlock
269
269
270 def rawcommit(self, files, text, user, date, p1=None, p2=None):
270 def rawcommit(self, files, text, user, date, p1=None, p2=None):
271 orig_parent = self.dirstate.parents()[0] or nullid
271 orig_parent = self.dirstate.parents()[0] or nullid
272 p1 = p1 or self.dirstate.parents()[0] or nullid
272 p1 = p1 or self.dirstate.parents()[0] or nullid
273 p2 = p2 or self.dirstate.parents()[1] or nullid
273 p2 = p2 or self.dirstate.parents()[1] or nullid
274 c1 = self.changelog.read(p1)
274 c1 = self.changelog.read(p1)
275 c2 = self.changelog.read(p2)
275 c2 = self.changelog.read(p2)
276 m1 = self.manifest.read(c1[0])
276 m1 = self.manifest.read(c1[0])
277 mf1 = self.manifest.readflags(c1[0])
277 mf1 = self.manifest.readflags(c1[0])
278 m2 = self.manifest.read(c2[0])
278 m2 = self.manifest.read(c2[0])
279 changed = []
279 changed = []
280
280
281 if orig_parent == p1:
281 if orig_parent == p1:
282 update_dirstate = 1
282 update_dirstate = 1
283 else:
283 else:
284 update_dirstate = 0
284 update_dirstate = 0
285
285
286 wlock = self.wlock()
286 wlock = self.wlock()
287 lock = self.lock()
287 lock = self.lock()
288 tr = self.transaction()
288 tr = self.transaction()
289 mm = m1.copy()
289 mm = m1.copy()
290 mfm = mf1.copy()
290 mfm = mf1.copy()
291 linkrev = self.changelog.count()
291 linkrev = self.changelog.count()
292 for f in files:
292 for f in files:
293 try:
293 try:
294 t = self.wread(f)
294 t = self.wread(f)
295 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
295 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
296 r = self.file(f)
296 r = self.file(f)
297 mfm[f] = tm
297 mfm[f] = tm
298
298
299 fp1 = m1.get(f, nullid)
299 fp1 = m1.get(f, nullid)
300 fp2 = m2.get(f, nullid)
300 fp2 = m2.get(f, nullid)
301
301
302 # is the same revision on two branches of a merge?
302 # is the same revision on two branches of a merge?
303 if fp2 == fp1:
303 if fp2 == fp1:
304 fp2 = nullid
304 fp2 = nullid
305
305
306 if fp2 != nullid:
306 if fp2 != nullid:
307 # is one parent an ancestor of the other?
307 # is one parent an ancestor of the other?
308 fpa = r.ancestor(fp1, fp2)
308 fpa = r.ancestor(fp1, fp2)
309 if fpa == fp1:
309 if fpa == fp1:
310 fp1, fp2 = fp2, nullid
310 fp1, fp2 = fp2, nullid
311 elif fpa == fp2:
311 elif fpa == fp2:
312 fp2 = nullid
312 fp2 = nullid
313
313
314 # is the file unmodified from the parent?
314 # is the file unmodified from the parent?
315 if t == r.read(fp1):
315 if t == r.read(fp1):
316 # record the proper existing parent in manifest
316 # record the proper existing parent in manifest
317 # no need to add a revision
317 # no need to add a revision
318 mm[f] = fp1
318 mm[f] = fp1
319 continue
319 continue
320
320
321 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
321 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
322 changed.append(f)
322 changed.append(f)
323 if update_dirstate:
323 if update_dirstate:
324 self.dirstate.update([f], "n")
324 self.dirstate.update([f], "n")
325 except IOError:
325 except IOError:
326 try:
326 try:
327 del mm[f]
327 del mm[f]
328 del mfm[f]
328 del mfm[f]
329 if update_dirstate:
329 if update_dirstate:
330 self.dirstate.forget([f])
330 self.dirstate.forget([f])
331 except:
331 except:
332 # deleted from p2?
332 # deleted from p2?
333 pass
333 pass
334
334
335 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
335 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
336 user = user or self.ui.username()
336 user = user or self.ui.username()
337 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
337 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
338 tr.close()
338 tr.close()
339 if update_dirstate:
339 if update_dirstate:
340 self.dirstate.setparents(n, nullid)
340 self.dirstate.setparents(n, nullid)
341
341
342 def commit(self, files=None, text="", user=None, date=None,
342 def commit(self, files=None, text="", user=None, date=None,
343 match=util.always, force=False):
343 match=util.always, force=False):
344 commit = []
344 commit = []
345 remove = []
345 remove = []
346 changed = []
346 changed = []
347
347
348 if files:
348 if files:
349 for f in files:
349 for f in files:
350 s = self.dirstate.state(f)
350 s = self.dirstate.state(f)
351 if s in 'nmai':
351 if s in 'nmai':
352 commit.append(f)
352 commit.append(f)
353 elif s == 'r':
353 elif s == 'r':
354 remove.append(f)
354 remove.append(f)
355 else:
355 else:
356 self.ui.warn(_("%s not tracked!\n") % f)
356 self.ui.warn(_("%s not tracked!\n") % f)
357 else:
357 else:
358 modified, added, removed, deleted, unknown = self.changes(match=match)
358 modified, added, removed, deleted, unknown = self.changes(match=match)
359 commit = modified + added
359 commit = modified + added
360 remove = removed
360 remove = removed
361
361
362 p1, p2 = self.dirstate.parents()
362 p1, p2 = self.dirstate.parents()
363 c1 = self.changelog.read(p1)
363 c1 = self.changelog.read(p1)
364 c2 = self.changelog.read(p2)
364 c2 = self.changelog.read(p2)
365 m1 = self.manifest.read(c1[0])
365 m1 = self.manifest.read(c1[0])
366 mf1 = self.manifest.readflags(c1[0])
366 mf1 = self.manifest.readflags(c1[0])
367 m2 = self.manifest.read(c2[0])
367 m2 = self.manifest.read(c2[0])
368
368
369 if not commit and not remove and not force and p2 == nullid:
369 if not commit and not remove and not force and p2 == nullid:
370 self.ui.status(_("nothing changed\n"))
370 self.ui.status(_("nothing changed\n"))
371 return None
371 return None
372
372
373 if not self.hook("precommit"):
373 if not self.hook("precommit"):
374 return None
374 return None
375
375
376 wlock = self.wlock()
376 wlock = self.wlock()
377 lock = self.lock()
377 lock = self.lock()
378 tr = self.transaction()
378 tr = self.transaction()
379
379
380 # check in files
380 # check in files
381 new = {}
381 new = {}
382 linkrev = self.changelog.count()
382 linkrev = self.changelog.count()
383 commit.sort()
383 commit.sort()
384 for f in commit:
384 for f in commit:
385 self.ui.note(f + "\n")
385 self.ui.note(f + "\n")
386 try:
386 try:
387 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
387 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
388 t = self.wread(f)
388 t = self.wread(f)
389 except IOError:
389 except IOError:
390 self.ui.warn(_("trouble committing %s!\n") % f)
390 self.ui.warn(_("trouble committing %s!\n") % f)
391 raise
391 raise
392
392
393 r = self.file(f)
393 r = self.file(f)
394
394
395 meta = {}
395 meta = {}
396 cp = self.dirstate.copied(f)
396 cp = self.dirstate.copied(f)
397 if cp:
397 if cp:
398 meta["copy"] = cp
398 meta["copy"] = cp
399 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
399 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
400 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
400 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
401 fp1, fp2 = nullid, nullid
401 fp1, fp2 = nullid, nullid
402 else:
402 else:
403 fp1 = m1.get(f, nullid)
403 fp1 = m1.get(f, nullid)
404 fp2 = m2.get(f, nullid)
404 fp2 = m2.get(f, nullid)
405
405
406 if fp2 != nullid:
406 if fp2 != nullid:
407 # is one parent an ancestor of the other?
407 # is one parent an ancestor of the other?
408 fpa = r.ancestor(fp1, fp2)
408 fpa = r.ancestor(fp1, fp2)
409 if fpa == fp1:
409 if fpa == fp1:
410 fp1, fp2 = fp2, nullid
410 fp1, fp2 = fp2, nullid
411 elif fpa == fp2:
411 elif fpa == fp2:
412 fp2 = nullid
412 fp2 = nullid
413
413
414 # is the file unmodified from the parent?
414 # is the file unmodified from the parent?
415 if not meta and t == r.read(fp1) and fp2 == nullid:
415 if not meta and t == r.read(fp1) and fp2 == nullid:
416 # record the proper existing parent in manifest
416 # record the proper existing parent in manifest
417 # no need to add a revision
417 # no need to add a revision
418 new[f] = fp1
418 new[f] = fp1
419 continue
419 continue
420
420
421 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
421 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
422 # remember what we've added so that we can later calculate
422 # remember what we've added so that we can later calculate
423 # the files to pull from a set of changesets
423 # the files to pull from a set of changesets
424 changed.append(f)
424 changed.append(f)
425
425
426 # update manifest
426 # update manifest
427 m1 = m1.copy()
427 m1 = m1.copy()
428 m1.update(new)
428 m1.update(new)
429 for f in remove:
429 for f in remove:
430 if f in m1:
430 if f in m1:
431 del m1[f]
431 del m1[f]
432 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
432 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
433 (new, remove))
433 (new, remove))
434
434
435 # add changeset
435 # add changeset
436 new = new.keys()
436 new = new.keys()
437 new.sort()
437 new.sort()
438
438
439 if not text:
439 if not text:
440 edittext = ""
440 edittext = ""
441 if p2 != nullid:
441 if p2 != nullid:
442 edittext += "HG: branch merge\n"
442 edittext += "HG: branch merge\n"
443 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
443 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
444 edittext += "".join(["HG: changed %s\n" % f for f in changed])
444 edittext += "".join(["HG: changed %s\n" % f for f in changed])
445 edittext += "".join(["HG: removed %s\n" % f for f in remove])
445 edittext += "".join(["HG: removed %s\n" % f for f in remove])
446 if not changed and not remove:
446 if not changed and not remove:
447 edittext += "HG: no files changed\n"
447 edittext += "HG: no files changed\n"
448 edittext = self.ui.edit(edittext)
448 edittext = self.ui.edit(edittext)
449 if not edittext.rstrip():
449 if not edittext.rstrip():
450 return None
450 return None
451 text = edittext
451 text = edittext
452
452
453 user = user or self.ui.username()
453 user = user or self.ui.username()
454 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
454 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
455 tr.close()
455 tr.close()
456
456
457 self.dirstate.setparents(n)
457 self.dirstate.setparents(n)
458 self.dirstate.update(new, "n")
458 self.dirstate.update(new, "n")
459 self.dirstate.forget(remove)
459 self.dirstate.forget(remove)
460
460
461 if not self.hook("commit", node=hex(n)):
461 if not self.hook("commit", node=hex(n)):
462 return None
462 return None
463 return n
463 return n
464
464
465 def walk(self, node=None, files=[], match=util.always):
465 def walk(self, node=None, files=[], match=util.always):
466 if node:
466 if node:
467 fdict = dict.fromkeys(files)
467 fdict = dict.fromkeys(files)
468 for fn in self.manifest.read(self.changelog.read(node)[0]):
468 for fn in self.manifest.read(self.changelog.read(node)[0]):
469 fdict.pop(fn, None)
469 fdict.pop(fn, None)
470 if match(fn):
470 if match(fn):
471 yield 'm', fn
471 yield 'm', fn
472 for fn in fdict:
472 for fn in fdict:
473 self.ui.warn(_('%s: No such file in rev %s\n') % (
473 self.ui.warn(_('%s: No such file in rev %s\n') % (
474 util.pathto(self.getcwd(), fn), short(node)))
474 util.pathto(self.getcwd(), fn), short(node)))
475 else:
475 else:
476 for src, fn in self.dirstate.walk(files, match):
476 for src, fn in self.dirstate.walk(files, match):
477 yield src, fn
477 yield src, fn
478
478
479 def changes(self, node1=None, node2=None, files=[], match=util.always):
479 def changes(self, node1=None, node2=None, files=[], match=util.always):
480 """return changes between two nodes or node and working directory
480 """return changes between two nodes or node and working directory
481
481
482 If node1 is None, use the first dirstate parent instead.
482 If node1 is None, use the first dirstate parent instead.
483 If node2 is None, compare node1 with working directory.
483 If node2 is None, compare node1 with working directory.
484 """
484 """
485
485
486 def fcmp(fn, mf):
486 def fcmp(fn, mf):
487 t1 = self.wread(fn)
487 t1 = self.wread(fn)
488 t2 = self.file(fn).read(mf.get(fn, nullid))
488 t2 = self.file(fn).read(mf.get(fn, nullid))
489 return cmp(t1, t2)
489 return cmp(t1, t2)
490
490
491 def mfmatches(node):
491 def mfmatches(node):
492 change = self.changelog.read(node)
492 change = self.changelog.read(node)
493 mf = dict(self.manifest.read(change[0]))
493 mf = dict(self.manifest.read(change[0]))
494 for fn in mf.keys():
494 for fn in mf.keys():
495 if not match(fn):
495 if not match(fn):
496 del mf[fn]
496 del mf[fn]
497 return mf
497 return mf
498
498
499 # are we comparing the working directory?
499 # are we comparing the working directory?
500 if not node2:
500 if not node2:
501 try:
501 try:
502 wlock = self.wlock(wait=0)
502 wlock = self.wlock(wait=0)
503 except lock.LockHeld:
503 except lock.LockHeld:
504 wlock = None
504 wlock = None
505 lookup, modified, added, removed, deleted, unknown = (
505 lookup, modified, added, removed, deleted, unknown = (
506 self.dirstate.changes(files, match))
506 self.dirstate.changes(files, match))
507
507
508 # are we comparing working dir against its parent?
508 # are we comparing working dir against its parent?
509 if not node1:
509 if not node1:
510 if lookup:
510 if lookup:
511 # do a full compare of any files that might have changed
511 # do a full compare of any files that might have changed
512 mf2 = mfmatches(self.dirstate.parents()[0])
512 mf2 = mfmatches(self.dirstate.parents()[0])
513 for f in lookup:
513 for f in lookup:
514 if fcmp(f, mf2):
514 if fcmp(f, mf2):
515 modified.append(f)
515 modified.append(f)
516 elif wlock is not None:
516 elif wlock is not None:
517 self.dirstate.update([f], "n")
517 self.dirstate.update([f], "n")
518 else:
518 else:
519 # we are comparing working dir against non-parent
519 # we are comparing working dir against non-parent
520 # generate a pseudo-manifest for the working dir
520 # generate a pseudo-manifest for the working dir
521 mf2 = mfmatches(self.dirstate.parents()[0])
521 mf2 = mfmatches(self.dirstate.parents()[0])
522 for f in lookup + modified + added:
522 for f in lookup + modified + added:
523 mf2[f] = ""
523 mf2[f] = ""
524 for f in removed:
524 for f in removed:
525 if f in mf2:
525 if f in mf2:
526 del mf2[f]
526 del mf2[f]
527 else:
527 else:
528 # we are comparing two revisions
528 # we are comparing two revisions
529 deleted, unknown = [], []
529 deleted, unknown = [], []
530 mf2 = mfmatches(node2)
530 mf2 = mfmatches(node2)
531
531
532 if node1:
532 if node1:
533 # flush lists from dirstate before comparing manifests
533 # flush lists from dirstate before comparing manifests
534 modified, added = [], []
534 modified, added = [], []
535
535
536 mf1 = mfmatches(node1)
536 mf1 = mfmatches(node1)
537
537
538 for fn in mf2:
538 for fn in mf2:
539 if mf1.has_key(fn):
539 if mf1.has_key(fn):
540 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
540 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
541 modified.append(fn)
541 modified.append(fn)
542 del mf1[fn]
542 del mf1[fn]
543 else:
543 else:
544 added.append(fn)
544 added.append(fn)
545
545
546 removed = mf1.keys()
546 removed = mf1.keys()
547
547
548 # sort and return results:
548 # sort and return results:
549 for l in modified, added, removed, deleted, unknown:
549 for l in modified, added, removed, deleted, unknown:
550 l.sort()
550 l.sort()
551 return (modified, added, removed, deleted, unknown)
551 return (modified, added, removed, deleted, unknown)
552
552
553 def add(self, list):
553 def add(self, list):
554 wlock = self.wlock()
554 wlock = self.wlock()
555 for f in list:
555 for f in list:
556 p = self.wjoin(f)
556 p = self.wjoin(f)
557 if not os.path.exists(p):
557 if not os.path.exists(p):
558 self.ui.warn(_("%s does not exist!\n") % f)
558 self.ui.warn(_("%s does not exist!\n") % f)
559 elif not os.path.isfile(p):
559 elif not os.path.isfile(p):
560 self.ui.warn(_("%s not added: only files supported currently\n")
560 self.ui.warn(_("%s not added: only files supported currently\n")
561 % f)
561 % f)
562 elif self.dirstate.state(f) in 'an':
562 elif self.dirstate.state(f) in 'an':
563 self.ui.warn(_("%s already tracked!\n") % f)
563 self.ui.warn(_("%s already tracked!\n") % f)
564 else:
564 else:
565 self.dirstate.update([f], "a")
565 self.dirstate.update([f], "a")
566
566
567 def forget(self, list):
567 def forget(self, list):
568 wlock = self.wlock()
568 wlock = self.wlock()
569 for f in list:
569 for f in list:
570 if self.dirstate.state(f) not in 'ai':
570 if self.dirstate.state(f) not in 'ai':
571 self.ui.warn(_("%s not added!\n") % f)
571 self.ui.warn(_("%s not added!\n") % f)
572 else:
572 else:
573 self.dirstate.forget([f])
573 self.dirstate.forget([f])
574
574
575 def remove(self, list, unlink=False):
575 def remove(self, list, unlink=False):
576 if unlink:
576 if unlink:
577 for f in list:
577 for f in list:
578 try:
578 try:
579 util.unlink(self.wjoin(f))
579 util.unlink(self.wjoin(f))
580 except OSError, inst:
580 except OSError, inst:
581 if inst.errno != errno.ENOENT:
581 if inst.errno != errno.ENOENT:
582 raise
582 raise
583 wlock = self.wlock()
583 wlock = self.wlock()
584 for f in list:
584 for f in list:
585 p = self.wjoin(f)
585 p = self.wjoin(f)
586 if os.path.exists(p):
586 if os.path.exists(p):
587 self.ui.warn(_("%s still exists!\n") % f)
587 self.ui.warn(_("%s still exists!\n") % f)
588 elif self.dirstate.state(f) == 'a':
588 elif self.dirstate.state(f) == 'a':
589 self.ui.warn(_("%s never committed!\n") % f)
589 self.ui.warn(_("%s never committed!\n") % f)
590 self.dirstate.forget([f])
590 self.dirstate.forget([f])
591 elif f not in self.dirstate:
591 elif f not in self.dirstate:
592 self.ui.warn(_("%s not tracked!\n") % f)
592 self.ui.warn(_("%s not tracked!\n") % f)
593 else:
593 else:
594 self.dirstate.update([f], "r")
594 self.dirstate.update([f], "r")
595
595
596 def undelete(self, list):
596 def undelete(self, list):
597 p = self.dirstate.parents()[0]
597 p = self.dirstate.parents()[0]
598 mn = self.changelog.read(p)[0]
598 mn = self.changelog.read(p)[0]
599 mf = self.manifest.readflags(mn)
599 mf = self.manifest.readflags(mn)
600 m = self.manifest.read(mn)
600 m = self.manifest.read(mn)
601 wlock = self.wlock()
601 wlock = self.wlock()
602 for f in list:
602 for f in list:
603 if self.dirstate.state(f) not in "r":
603 if self.dirstate.state(f) not in "r":
604 self.ui.warn("%s not removed!\n" % f)
604 self.ui.warn("%s not removed!\n" % f)
605 else:
605 else:
606 t = self.file(f).read(m[f])
606 t = self.file(f).read(m[f])
607 self.wwrite(f, t)
607 self.wwrite(f, t)
608 util.set_exec(self.wjoin(f), mf[f])
608 util.set_exec(self.wjoin(f), mf[f])
609 self.dirstate.update([f], "n")
609 self.dirstate.update([f], "n")
610
610
611 def copy(self, source, dest):
611 def copy(self, source, dest):
612 p = self.wjoin(dest)
612 p = self.wjoin(dest)
613 if not os.path.exists(p):
613 if not os.path.exists(p):
614 self.ui.warn(_("%s does not exist!\n") % dest)
614 self.ui.warn(_("%s does not exist!\n") % dest)
615 elif not os.path.isfile(p):
615 elif not os.path.isfile(p):
616 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
616 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
617 else:
617 else:
618 wlock = self.wlock()
618 wlock = self.wlock()
619 if self.dirstate.state(dest) == '?':
619 if self.dirstate.state(dest) == '?':
620 self.dirstate.update([dest], "a")
620 self.dirstate.update([dest], "a")
621 self.dirstate.copy(source, dest)
621 self.dirstate.copy(source, dest)
622
622
623 def heads(self, start=None):
623 def heads(self, start=None):
624 heads = self.changelog.heads(start)
624 heads = self.changelog.heads(start)
625 # sort the output in rev descending order
625 # sort the output in rev descending order
626 heads = [(-self.changelog.rev(h), h) for h in heads]
626 heads = [(-self.changelog.rev(h), h) for h in heads]
627 heads.sort()
627 heads.sort()
628 return [n for (r, n) in heads]
628 return [n for (r, n) in heads]
629
629
630 # branchlookup returns a dict giving a list of branches for
630 # branchlookup returns a dict giving a list of branches for
631 # each head. A branch is defined as the tag of a node or
631 # each head. A branch is defined as the tag of a node or
632 # the branch of the node's parents. If a node has multiple
632 # the branch of the node's parents. If a node has multiple
633 # branch tags, tags are eliminated if they are visible from other
633 # branch tags, tags are eliminated if they are visible from other
634 # branch tags.
634 # branch tags.
635 #
635 #
636 # So, for this graph: a->b->c->d->e
636 # So, for this graph: a->b->c->d->e
637 # \ /
637 # \ /
638 # aa -----/
638 # aa -----/
639 # a has tag 2.6.12
639 # a has tag 2.6.12
640 # d has tag 2.6.13
640 # d has tag 2.6.13
641 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
641 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
642 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
642 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
643 # from the list.
643 # from the list.
644 #
644 #
645 # It is possible that more than one head will have the same branch tag.
645 # It is possible that more than one head will have the same branch tag.
646 # callers need to check the result for multiple heads under the same
646 # callers need to check the result for multiple heads under the same
647 # branch tag if that is a problem for them (ie checkout of a specific
647 # branch tag if that is a problem for them (ie checkout of a specific
648 # branch).
648 # branch).
649 #
649 #
650 # passing in a specific branch will limit the depth of the search
650 # passing in a specific branch will limit the depth of the search
651 # through the parents. It won't limit the branches returned in the
651 # through the parents. It won't limit the branches returned in the
652 # result though.
652 # result though.
653 def branchlookup(self, heads=None, branch=None):
653 def branchlookup(self, heads=None, branch=None):
654 if not heads:
654 if not heads:
655 heads = self.heads()
655 heads = self.heads()
656 headt = [ h for h in heads ]
656 headt = [ h for h in heads ]
657 chlog = self.changelog
657 chlog = self.changelog
658 branches = {}
658 branches = {}
659 merges = []
659 merges = []
660 seenmerge = {}
660 seenmerge = {}
661
661
662 # traverse the tree once for each head, recording in the branches
662 # traverse the tree once for each head, recording in the branches
663 # dict which tags are visible from this head. The branches
663 # dict which tags are visible from this head. The branches
664 # dict also records which tags are visible from each tag
664 # dict also records which tags are visible from each tag
665 # while we traverse.
665 # while we traverse.
666 while headt or merges:
666 while headt or merges:
667 if merges:
667 if merges:
668 n, found = merges.pop()
668 n, found = merges.pop()
669 visit = [n]
669 visit = [n]
670 else:
670 else:
671 h = headt.pop()
671 h = headt.pop()
672 visit = [h]
672 visit = [h]
673 found = [h]
673 found = [h]
674 seen = {}
674 seen = {}
675 while visit:
675 while visit:
676 n = visit.pop()
676 n = visit.pop()
677 if n in seen:
677 if n in seen:
678 continue
678 continue
679 pp = chlog.parents(n)
679 pp = chlog.parents(n)
680 tags = self.nodetags(n)
680 tags = self.nodetags(n)
681 if tags:
681 if tags:
682 for x in tags:
682 for x in tags:
683 if x == 'tip':
683 if x == 'tip':
684 continue
684 continue
685 for f in found:
685 for f in found:
686 branches.setdefault(f, {})[n] = 1
686 branches.setdefault(f, {})[n] = 1
687 branches.setdefault(n, {})[n] = 1
687 branches.setdefault(n, {})[n] = 1
688 break
688 break
689 if n not in found:
689 if n not in found:
690 found.append(n)
690 found.append(n)
691 if branch in tags:
691 if branch in tags:
692 continue
692 continue
693 seen[n] = 1
693 seen[n] = 1
694 if pp[1] != nullid and n not in seenmerge:
694 if pp[1] != nullid and n not in seenmerge:
695 merges.append((pp[1], [x for x in found]))
695 merges.append((pp[1], [x for x in found]))
696 seenmerge[n] = 1
696 seenmerge[n] = 1
697 if pp[0] != nullid:
697 if pp[0] != nullid:
698 visit.append(pp[0])
698 visit.append(pp[0])
699 # traverse the branches dict, eliminating branch tags from each
699 # traverse the branches dict, eliminating branch tags from each
700 # head that are visible from another branch tag for that head.
700 # head that are visible from another branch tag for that head.
701 out = {}
701 out = {}
702 viscache = {}
702 viscache = {}
703 for h in heads:
703 for h in heads:
704 def visible(node):
704 def visible(node):
705 if node in viscache:
705 if node in viscache:
706 return viscache[node]
706 return viscache[node]
707 ret = {}
707 ret = {}
708 visit = [node]
708 visit = [node]
709 while visit:
709 while visit:
710 x = visit.pop()
710 x = visit.pop()
711 if x in viscache:
711 if x in viscache:
712 ret.update(viscache[x])
712 ret.update(viscache[x])
713 elif x not in ret:
713 elif x not in ret:
714 ret[x] = 1
714 ret[x] = 1
715 if x in branches:
715 if x in branches:
716 visit[len(visit):] = branches[x].keys()
716 visit[len(visit):] = branches[x].keys()
717 viscache[node] = ret
717 viscache[node] = ret
718 return ret
718 return ret
719 if h not in branches:
719 if h not in branches:
720 continue
720 continue
721 # O(n^2), but somewhat limited. This only searches the
721 # O(n^2), but somewhat limited. This only searches the
722 # tags visible from a specific head, not all the tags in the
722 # tags visible from a specific head, not all the tags in the
723 # whole repo.
723 # whole repo.
724 for b in branches[h]:
724 for b in branches[h]:
725 vis = False
725 vis = False
726 for bb in branches[h].keys():
726 for bb in branches[h].keys():
727 if b != bb:
727 if b != bb:
728 if b in visible(bb):
728 if b in visible(bb):
729 vis = True
729 vis = True
730 break
730 break
731 if not vis:
731 if not vis:
732 l = out.setdefault(h, [])
732 l = out.setdefault(h, [])
733 l[len(l):] = self.nodetags(b)
733 l[len(l):] = self.nodetags(b)
734 return out
734 return out
735
735
736 def branches(self, nodes):
736 def branches(self, nodes):
737 if not nodes:
737 if not nodes:
738 nodes = [self.changelog.tip()]
738 nodes = [self.changelog.tip()]
739 b = []
739 b = []
740 for n in nodes:
740 for n in nodes:
741 t = n
741 t = n
742 while n:
742 while n:
743 p = self.changelog.parents(n)
743 p = self.changelog.parents(n)
744 if p[1] != nullid or p[0] == nullid:
744 if p[1] != nullid or p[0] == nullid:
745 b.append((t, n, p[0], p[1]))
745 b.append((t, n, p[0], p[1]))
746 break
746 break
747 n = p[0]
747 n = p[0]
748 return b
748 return b
749
749
750 def between(self, pairs):
750 def between(self, pairs):
751 r = []
751 r = []
752
752
753 for top, bottom in pairs:
753 for top, bottom in pairs:
754 n, l, i = top, [], 0
754 n, l, i = top, [], 0
755 f = 1
755 f = 1
756
756
757 while n != bottom:
757 while n != bottom:
758 p = self.changelog.parents(n)[0]
758 p = self.changelog.parents(n)[0]
759 if i == f:
759 if i == f:
760 l.append(n)
760 l.append(n)
761 f = f * 2
761 f = f * 2
762 n = p
762 n = p
763 i += 1
763 i += 1
764
764
765 r.append(l)
765 r.append(l)
766
766
767 return r
767 return r
768
768
769 def findincoming(self, remote, base=None, heads=None):
769 def findincoming(self, remote, base=None, heads=None):
770 m = self.changelog.nodemap
770 m = self.changelog.nodemap
771 search = []
771 search = []
772 fetch = {}
772 fetch = {}
773 seen = {}
773 seen = {}
774 seenbranch = {}
774 seenbranch = {}
775 if base == None:
775 if base == None:
776 base = {}
776 base = {}
777
777
778 # assume we're closer to the tip than the root
778 # assume we're closer to the tip than the root
779 # and start by examining the heads
779 # and start by examining the heads
780 self.ui.status(_("searching for changes\n"))
780 self.ui.status(_("searching for changes\n"))
781
781
782 if not heads:
782 if not heads:
783 heads = remote.heads()
783 heads = remote.heads()
784
784
785 unknown = []
785 unknown = []
786 for h in heads:
786 for h in heads:
787 if h not in m:
787 if h not in m:
788 unknown.append(h)
788 unknown.append(h)
789 else:
789 else:
790 base[h] = 1
790 base[h] = 1
791
791
792 if not unknown:
792 if not unknown:
793 return None
793 return None
794
794
795 rep = {}
795 rep = {}
796 reqcnt = 0
796 reqcnt = 0
797
797
798 # search through remote branches
798 # search through remote branches
799 # a 'branch' here is a linear segment of history, with four parts:
799 # a 'branch' here is a linear segment of history, with four parts:
800 # head, root, first parent, second parent
800 # head, root, first parent, second parent
801 # (a branch always has two parents (or none) by definition)
801 # (a branch always has two parents (or none) by definition)
802 unknown = remote.branches(unknown)
802 unknown = remote.branches(unknown)
803 while unknown:
803 while unknown:
804 r = []
804 r = []
805 while unknown:
805 while unknown:
806 n = unknown.pop(0)
806 n = unknown.pop(0)
807 if n[0] in seen:
807 if n[0] in seen:
808 continue
808 continue
809
809
810 self.ui.debug(_("examining %s:%s\n")
810 self.ui.debug(_("examining %s:%s\n")
811 % (short(n[0]), short(n[1])))
811 % (short(n[0]), short(n[1])))
812 if n[0] == nullid:
812 if n[0] == nullid:
813 break
813 break
814 if n in seenbranch:
814 if n in seenbranch:
815 self.ui.debug(_("branch already found\n"))
815 self.ui.debug(_("branch already found\n"))
816 continue
816 continue
817 if n[1] and n[1] in m: # do we know the base?
817 if n[1] and n[1] in m: # do we know the base?
818 self.ui.debug(_("found incomplete branch %s:%s\n")
818 self.ui.debug(_("found incomplete branch %s:%s\n")
819 % (short(n[0]), short(n[1])))
819 % (short(n[0]), short(n[1])))
820 search.append(n) # schedule branch range for scanning
820 search.append(n) # schedule branch range for scanning
821 seenbranch[n] = 1
821 seenbranch[n] = 1
822 else:
822 else:
823 if n[1] not in seen and n[1] not in fetch:
823 if n[1] not in seen and n[1] not in fetch:
824 if n[2] in m and n[3] in m:
824 if n[2] in m and n[3] in m:
825 self.ui.debug(_("found new changeset %s\n") %
825 self.ui.debug(_("found new changeset %s\n") %
826 short(n[1]))
826 short(n[1]))
827 fetch[n[1]] = 1 # earliest unknown
827 fetch[n[1]] = 1 # earliest unknown
828 base[n[2]] = 1 # latest known
828 base[n[2]] = 1 # latest known
829 continue
829 continue
830
830
831 for a in n[2:4]:
831 for a in n[2:4]:
832 if a not in rep:
832 if a not in rep:
833 r.append(a)
833 r.append(a)
834 rep[a] = 1
834 rep[a] = 1
835
835
836 seen[n[0]] = 1
836 seen[n[0]] = 1
837
837
838 if r:
838 if r:
839 reqcnt += 1
839 reqcnt += 1
840 self.ui.debug(_("request %d: %s\n") %
840 self.ui.debug(_("request %d: %s\n") %
841 (reqcnt, " ".join(map(short, r))))
841 (reqcnt, " ".join(map(short, r))))
842 for p in range(0, len(r), 10):
842 for p in range(0, len(r), 10):
843 for b in remote.branches(r[p:p+10]):
843 for b in remote.branches(r[p:p+10]):
844 self.ui.debug(_("received %s:%s\n") %
844 self.ui.debug(_("received %s:%s\n") %
845 (short(b[0]), short(b[1])))
845 (short(b[0]), short(b[1])))
846 if b[0] in m:
846 if b[0] in m:
847 self.ui.debug(_("found base node %s\n")
847 self.ui.debug(_("found base node %s\n")
848 % short(b[0]))
848 % short(b[0]))
849 base[b[0]] = 1
849 base[b[0]] = 1
850 elif b[0] not in seen:
850 elif b[0] not in seen:
851 unknown.append(b)
851 unknown.append(b)
852
852
853 # do binary search on the branches we found
853 # do binary search on the branches we found
854 while search:
854 while search:
855 n = search.pop(0)
855 n = search.pop(0)
856 reqcnt += 1
856 reqcnt += 1
857 l = remote.between([(n[0], n[1])])[0]
857 l = remote.between([(n[0], n[1])])[0]
858 l.append(n[1])
858 l.append(n[1])
859 p = n[0]
859 p = n[0]
860 f = 1
860 f = 1
861 for i in l:
861 for i in l:
862 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
862 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
863 if i in m:
863 if i in m:
864 if f <= 2:
864 if f <= 2:
865 self.ui.debug(_("found new branch changeset %s\n") %
865 self.ui.debug(_("found new branch changeset %s\n") %
866 short(p))
866 short(p))
867 fetch[p] = 1
867 fetch[p] = 1
868 base[i] = 1
868 base[i] = 1
869 else:
869 else:
870 self.ui.debug(_("narrowed branch search to %s:%s\n")
870 self.ui.debug(_("narrowed branch search to %s:%s\n")
871 % (short(p), short(i)))
871 % (short(p), short(i)))
872 search.append((p, i))
872 search.append((p, i))
873 break
873 break
874 p, f = i, f * 2
874 p, f = i, f * 2
875
875
876 # sanity check our fetch list
876 # sanity check our fetch list
877 for f in fetch.keys():
877 for f in fetch.keys():
878 if f in m:
878 if f in m:
879 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
879 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
880
880
881 if base.keys() == [nullid]:
881 if base.keys() == [nullid]:
882 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
882 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
883
883
884 self.ui.note(_("found new changesets starting at ") +
884 self.ui.note(_("found new changesets starting at ") +
885 " ".join([short(f) for f in fetch]) + "\n")
885 " ".join([short(f) for f in fetch]) + "\n")
886
886
887 self.ui.debug(_("%d total queries\n") % reqcnt)
887 self.ui.debug(_("%d total queries\n") % reqcnt)
888
888
889 return fetch.keys()
889 return fetch.keys()
890
890
891 def findoutgoing(self, remote, base=None, heads=None):
891 def findoutgoing(self, remote, base=None, heads=None):
892 if base == None:
892 if base == None:
893 base = {}
893 base = {}
894 self.findincoming(remote, base, heads)
894 self.findincoming(remote, base, heads)
895
895
896 self.ui.debug(_("common changesets up to ")
896 self.ui.debug(_("common changesets up to ")
897 + " ".join(map(short, base.keys())) + "\n")
897 + " ".join(map(short, base.keys())) + "\n")
898
898
899 remain = dict.fromkeys(self.changelog.nodemap)
899 remain = dict.fromkeys(self.changelog.nodemap)
900
900
901 # prune everything remote has from the tree
901 # prune everything remote has from the tree
902 del remain[nullid]
902 del remain[nullid]
903 remove = base.keys()
903 remove = base.keys()
904 while remove:
904 while remove:
905 n = remove.pop(0)
905 n = remove.pop(0)
906 if n in remain:
906 if n in remain:
907 del remain[n]
907 del remain[n]
908 for p in self.changelog.parents(n):
908 for p in self.changelog.parents(n):
909 remove.append(p)
909 remove.append(p)
910
910
911 # find every node whose parents have been pruned
911 # find every node whose parents have been pruned
912 subset = []
912 subset = []
913 for n in remain:
913 for n in remain:
914 p1, p2 = self.changelog.parents(n)
914 p1, p2 = self.changelog.parents(n)
915 if p1 not in remain and p2 not in remain:
915 if p1 not in remain and p2 not in remain:
916 subset.append(n)
916 subset.append(n)
917
917
918 # this is the set of all roots we have to push
918 # this is the set of all roots we have to push
919 return subset
919 return subset
920
920
921 def pull(self, remote, heads=None):
921 def pull(self, remote, heads=None):
922 lock = self.lock()
922 lock = self.lock()
923
923
924 # if we have an empty repo, fetch everything
924 # if we have an empty repo, fetch everything
925 if self.changelog.tip() == nullid:
925 if self.changelog.tip() == nullid:
926 self.ui.status(_("requesting all changes\n"))
926 self.ui.status(_("requesting all changes\n"))
927 fetch = [nullid]
927 fetch = [nullid]
928 else:
928 else:
929 fetch = self.findincoming(remote)
929 fetch = self.findincoming(remote)
930
930
931 if not fetch:
931 if not fetch:
932 self.ui.status(_("no changes found\n"))
932 self.ui.status(_("no changes found\n"))
933 return 1
933 return 1
934
934
935 if heads is None:
935 if heads is None:
936 cg = remote.changegroup(fetch)
936 cg = remote.changegroup(fetch)
937 else:
937 else:
938 cg = remote.changegroupsubset(fetch, heads)
938 cg = remote.changegroupsubset(fetch, heads)
939 return self.addchangegroup(cg)
939 return self.addchangegroup(cg)
940
940
941 def push(self, remote, force=False):
941 def push(self, remote, force=False):
942 lock = remote.lock()
942 lock = remote.lock()
943
943
944 base = {}
944 base = {}
945 heads = remote.heads()
945 heads = remote.heads()
946 inc = self.findincoming(remote, base, heads)
946 inc = self.findincoming(remote, base, heads)
947 if not force and inc:
947 if not force and inc:
948 self.ui.warn(_("abort: unsynced remote changes!\n"))
948 self.ui.warn(_("abort: unsynced remote changes!\n"))
949 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
949 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
950 return 1
950 return 1
951
951
952 update = self.findoutgoing(remote, base)
952 update = self.findoutgoing(remote, base)
953 if not update:
953 if not update:
954 self.ui.status(_("no changes found\n"))
954 self.ui.status(_("no changes found\n"))
955 return 1
955 return 1
956 elif not force:
956 elif not force:
957 if len(heads) < len(self.changelog.heads()):
957 if len(heads) < len(self.changelog.heads()):
958 self.ui.warn(_("abort: push creates new remote branches!\n"))
958 self.ui.warn(_("abort: push creates new remote branches!\n"))
959 self.ui.status(_("(did you forget to merge?"
959 self.ui.status(_("(did you forget to merge?"
960 " use push -f to force)\n"))
960 " use push -f to force)\n"))
961 return 1
961 return 1
962
962
963 cg = self.changegroup(update)
963 cg = self.changegroup(update)
964 return remote.addchangegroup(cg)
964 return remote.addchangegroup(cg)
965
965
966 def changegroupsubset(self, bases, heads):
966 def changegroupsubset(self, bases, heads):
967 """This function generates a changegroup consisting of all the nodes
967 """This function generates a changegroup consisting of all the nodes
968 that are descendents of any of the bases, and ancestors of any of
968 that are descendents of any of the bases, and ancestors of any of
969 the heads.
969 the heads.
970
970
971 It is fairly complex as determining which filenodes and which
971 It is fairly complex as determining which filenodes and which
972 manifest nodes need to be included for the changeset to be complete
972 manifest nodes need to be included for the changeset to be complete
973 is non-trivial.
973 is non-trivial.
974
974
975 Another wrinkle is doing the reverse, figuring out which changeset in
975 Another wrinkle is doing the reverse, figuring out which changeset in
976 the changegroup a particular filenode or manifestnode belongs to."""
976 the changegroup a particular filenode or manifestnode belongs to."""
977
977
978 # Set up some initial variables
978 # Set up some initial variables
979 # Make it easy to refer to self.changelog
979 # Make it easy to refer to self.changelog
980 cl = self.changelog
980 cl = self.changelog
981 # msng is short for missing - compute the list of changesets in this
981 # msng is short for missing - compute the list of changesets in this
982 # changegroup.
982 # changegroup.
983 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
983 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
984 # Some bases may turn out to be superfluous, and some heads may be
984 # Some bases may turn out to be superfluous, and some heads may be
985 # too. nodesbetween will return the minimal set of bases and heads
985 # too. nodesbetween will return the minimal set of bases and heads
986 # necessary to re-create the changegroup.
986 # necessary to re-create the changegroup.
987
987
988 # Known heads are the list of heads that it is assumed the recipient
988 # Known heads are the list of heads that it is assumed the recipient
989 # of this changegroup will know about.
989 # of this changegroup will know about.
990 knownheads = {}
990 knownheads = {}
991 # We assume that all parents of bases are known heads.
991 # We assume that all parents of bases are known heads.
992 for n in bases:
992 for n in bases:
993 for p in cl.parents(n):
993 for p in cl.parents(n):
994 if p != nullid:
994 if p != nullid:
995 knownheads[p] = 1
995 knownheads[p] = 1
996 knownheads = knownheads.keys()
996 knownheads = knownheads.keys()
997 if knownheads:
997 if knownheads:
998 # Now that we know what heads are known, we can compute which
998 # Now that we know what heads are known, we can compute which
999 # changesets are known. The recipient must know about all
999 # changesets are known. The recipient must know about all
1000 # changesets required to reach the known heads from the null
1000 # changesets required to reach the known heads from the null
1001 # changeset.
1001 # changeset.
1002 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1002 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1003 junk = None
1003 junk = None
1004 # Transform the list into an ersatz set.
1004 # Transform the list into an ersatz set.
1005 has_cl_set = dict.fromkeys(has_cl_set)
1005 has_cl_set = dict.fromkeys(has_cl_set)
1006 else:
1006 else:
1007 # If there were no known heads, the recipient cannot be assumed to
1007 # If there were no known heads, the recipient cannot be assumed to
1008 # know about any changesets.
1008 # know about any changesets.
1009 has_cl_set = {}
1009 has_cl_set = {}
1010
1010
1011 # Make it easy to refer to self.manifest
1011 # Make it easy to refer to self.manifest
1012 mnfst = self.manifest
1012 mnfst = self.manifest
1013 # We don't know which manifests are missing yet
1013 # We don't know which manifests are missing yet
1014 msng_mnfst_set = {}
1014 msng_mnfst_set = {}
1015 # Nor do we know which filenodes are missing.
1015 # Nor do we know which filenodes are missing.
1016 msng_filenode_set = {}
1016 msng_filenode_set = {}
1017
1017
1018 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1018 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1019 junk = None
1019 junk = None
1020
1020
1021 # A changeset always belongs to itself, so the changenode lookup
1021 # A changeset always belongs to itself, so the changenode lookup
1022 # function for a changenode is identity.
1022 # function for a changenode is identity.
1023 def identity(x):
1023 def identity(x):
1024 return x
1024 return x
1025
1025
1026 # A function generating function. Sets up an environment for the
1026 # A function generating function. Sets up an environment for the
1027 # inner function.
1027 # inner function.
1028 def cmp_by_rev_func(revlog):
1028 def cmp_by_rev_func(revlog):
1029 # Compare two nodes by their revision number in the environment's
1029 # Compare two nodes by their revision number in the environment's
1030 # revision history. Since the revision number both represents the
1030 # revision history. Since the revision number both represents the
1031 # most efficient order to read the nodes in, and represents a
1031 # most efficient order to read the nodes in, and represents a
1032 # topological sorting of the nodes, this function is often useful.
1032 # topological sorting of the nodes, this function is often useful.
1033 def cmp_by_rev(a, b):
1033 def cmp_by_rev(a, b):
1034 return cmp(revlog.rev(a), revlog.rev(b))
1034 return cmp(revlog.rev(a), revlog.rev(b))
1035 return cmp_by_rev
1035 return cmp_by_rev
1036
1036
1037 # If we determine that a particular file or manifest node must be a
1037 # If we determine that a particular file or manifest node must be a
1038 # node that the recipient of the changegroup will already have, we can
1038 # node that the recipient of the changegroup will already have, we can
1039 # also assume the recipient will have all the parents. This function
1039 # also assume the recipient will have all the parents. This function
1040 # prunes them from the set of missing nodes.
1040 # prunes them from the set of missing nodes.
1041 def prune_parents(revlog, hasset, msngset):
1041 def prune_parents(revlog, hasset, msngset):
1042 haslst = hasset.keys()
1042 haslst = hasset.keys()
1043 haslst.sort(cmp_by_rev_func(revlog))
1043 haslst.sort(cmp_by_rev_func(revlog))
1044 for node in haslst:
1044 for node in haslst:
1045 parentlst = [p for p in revlog.parents(node) if p != nullid]
1045 parentlst = [p for p in revlog.parents(node) if p != nullid]
1046 while parentlst:
1046 while parentlst:
1047 n = parentlst.pop()
1047 n = parentlst.pop()
1048 if n not in hasset:
1048 if n not in hasset:
1049 hasset[n] = 1
1049 hasset[n] = 1
1050 p = [p for p in revlog.parents(n) if p != nullid]
1050 p = [p for p in revlog.parents(n) if p != nullid]
1051 parentlst.extend(p)
1051 parentlst.extend(p)
1052 for n in hasset:
1052 for n in hasset:
1053 msngset.pop(n, None)
1053 msngset.pop(n, None)
1054
1054
1055 # This is a function generating function used to set up an environment
1055 # This is a function generating function used to set up an environment
1056 # for the inner function to execute in.
1056 # for the inner function to execute in.
1057 def manifest_and_file_collector(changedfileset):
1057 def manifest_and_file_collector(changedfileset):
1058 # This is an information gathering function that gathers
1058 # This is an information gathering function that gathers
1059 # information from each changeset node that goes out as part of
1059 # information from each changeset node that goes out as part of
1060 # the changegroup. The information gathered is a list of which
1060 # the changegroup. The information gathered is a list of which
1061 # manifest nodes are potentially required (the recipient may
1061 # manifest nodes are potentially required (the recipient may
1062 # already have them) and total list of all files which were
1062 # already have them) and total list of all files which were
1063 # changed in any changeset in the changegroup.
1063 # changed in any changeset in the changegroup.
1064 #
1064 #
1065 # We also remember the first changenode we saw any manifest
1065 # We also remember the first changenode we saw any manifest
1066 # referenced by so we can later determine which changenode 'owns'
1066 # referenced by so we can later determine which changenode 'owns'
1067 # the manifest.
1067 # the manifest.
1068 def collect_manifests_and_files(clnode):
1068 def collect_manifests_and_files(clnode):
1069 c = cl.read(clnode)
1069 c = cl.read(clnode)
1070 for f in c[3]:
1070 for f in c[3]:
1071 # This is to make sure we only have one instance of each
1071 # This is to make sure we only have one instance of each
1072 # filename string for each filename.
1072 # filename string for each filename.
1073 changedfileset.setdefault(f, f)
1073 changedfileset.setdefault(f, f)
1074 msng_mnfst_set.setdefault(c[0], clnode)
1074 msng_mnfst_set.setdefault(c[0], clnode)
1075 return collect_manifests_and_files
1075 return collect_manifests_and_files
1076
1076
1077 # Figure out which manifest nodes (of the ones we think might be part
1077 # Figure out which manifest nodes (of the ones we think might be part
1078 # of the changegroup) the recipient must know about and remove them
1078 # of the changegroup) the recipient must know about and remove them
1079 # from the changegroup.
1079 # from the changegroup.
1080 def prune_manifests():
1080 def prune_manifests():
1081 has_mnfst_set = {}
1081 has_mnfst_set = {}
1082 for n in msng_mnfst_set:
1082 for n in msng_mnfst_set:
1083 # If a 'missing' manifest thinks it belongs to a changenode
1083 # If a 'missing' manifest thinks it belongs to a changenode
1084 # the recipient is assumed to have, obviously the recipient
1084 # the recipient is assumed to have, obviously the recipient
1085 # must have that manifest.
1085 # must have that manifest.
1086 linknode = cl.node(mnfst.linkrev(n))
1086 linknode = cl.node(mnfst.linkrev(n))
1087 if linknode in has_cl_set:
1087 if linknode in has_cl_set:
1088 has_mnfst_set[n] = 1
1088 has_mnfst_set[n] = 1
1089 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1089 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1090
1090
1091 # Use the information collected in collect_manifests_and_files to say
1091 # Use the information collected in collect_manifests_and_files to say
1092 # which changenode any manifestnode belongs to.
1092 # which changenode any manifestnode belongs to.
1093 def lookup_manifest_link(mnfstnode):
1093 def lookup_manifest_link(mnfstnode):
1094 return msng_mnfst_set[mnfstnode]
1094 return msng_mnfst_set[mnfstnode]
1095
1095
1096 # A function generating function that sets up the initial environment
1096 # A function generating function that sets up the initial environment
1097 # the inner function.
1097 # the inner function.
1098 def filenode_collector(changedfiles):
1098 def filenode_collector(changedfiles):
1099 next_rev = [0]
1099 next_rev = [0]
1100 # This gathers information from each manifestnode included in the
1100 # This gathers information from each manifestnode included in the
1101 # changegroup about which filenodes the manifest node references
1101 # changegroup about which filenodes the manifest node references
1102 # so we can include those in the changegroup too.
1102 # so we can include those in the changegroup too.
1103 #
1103 #
1104 # It also remembers which changenode each filenode belongs to. It
1104 # It also remembers which changenode each filenode belongs to. It
1105 # does this by assuming the a filenode belongs to the changenode
1105 # does this by assuming the a filenode belongs to the changenode
1106 # the first manifest that references it belongs to.
1106 # the first manifest that references it belongs to.
1107 def collect_msng_filenodes(mnfstnode):
1107 def collect_msng_filenodes(mnfstnode):
1108 r = mnfst.rev(mnfstnode)
1108 r = mnfst.rev(mnfstnode)
1109 if r == next_rev[0]:
1109 if r == next_rev[0]:
1110 # If the last rev we looked at was the one just previous,
1110 # If the last rev we looked at was the one just previous,
1111 # we only need to see a diff.
1111 # we only need to see a diff.
1112 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1112 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1113 # For each line in the delta
1113 # For each line in the delta
1114 for dline in delta.splitlines():
1114 for dline in delta.splitlines():
1115 # get the filename and filenode for that line
1115 # get the filename and filenode for that line
1116 f, fnode = dline.split('\0')
1116 f, fnode = dline.split('\0')
1117 fnode = bin(fnode[:40])
1117 fnode = bin(fnode[:40])
1118 f = changedfiles.get(f, None)
1118 f = changedfiles.get(f, None)
1119 # And if the file is in the list of files we care
1119 # And if the file is in the list of files we care
1120 # about.
1120 # about.
1121 if f is not None:
1121 if f is not None:
1122 # Get the changenode this manifest belongs to
1122 # Get the changenode this manifest belongs to
1123 clnode = msng_mnfst_set[mnfstnode]
1123 clnode = msng_mnfst_set[mnfstnode]
1124 # Create the set of filenodes for the file if
1124 # Create the set of filenodes for the file if
1125 # there isn't one already.
1125 # there isn't one already.
1126 ndset = msng_filenode_set.setdefault(f, {})
1126 ndset = msng_filenode_set.setdefault(f, {})
1127 # And set the filenode's changelog node to the
1127 # And set the filenode's changelog node to the
1128 # manifest's if it hasn't been set already.
1128 # manifest's if it hasn't been set already.
1129 ndset.setdefault(fnode, clnode)
1129 ndset.setdefault(fnode, clnode)
1130 else:
1130 else:
1131 # Otherwise we need a full manifest.
1131 # Otherwise we need a full manifest.
1132 m = mnfst.read(mnfstnode)
1132 m = mnfst.read(mnfstnode)
1133 # For every file in we care about.
1133 # For every file in we care about.
1134 for f in changedfiles:
1134 for f in changedfiles:
1135 fnode = m.get(f, None)
1135 fnode = m.get(f, None)
1136 # If it's in the manifest
1136 # If it's in the manifest
1137 if fnode is not None:
1137 if fnode is not None:
1138 # See comments above.
1138 # See comments above.
1139 clnode = msng_mnfst_set[mnfstnode]
1139 clnode = msng_mnfst_set[mnfstnode]
1140 ndset = msng_filenode_set.setdefault(f, {})
1140 ndset = msng_filenode_set.setdefault(f, {})
1141 ndset.setdefault(fnode, clnode)
1141 ndset.setdefault(fnode, clnode)
1142 # Remember the revision we hope to see next.
1142 # Remember the revision we hope to see next.
1143 next_rev[0] = r + 1
1143 next_rev[0] = r + 1
1144 return collect_msng_filenodes
1144 return collect_msng_filenodes
1145
1145
1146 # We have a list of filenodes we think we need for a file, lets remove
1146 # We have a list of filenodes we think we need for a file, lets remove
1147 # all those we now the recipient must have.
1147 # all those we now the recipient must have.
1148 def prune_filenodes(f, filerevlog):
1148 def prune_filenodes(f, filerevlog):
1149 msngset = msng_filenode_set[f]
1149 msngset = msng_filenode_set[f]
1150 hasset = {}
1150 hasset = {}
1151 # If a 'missing' filenode thinks it belongs to a changenode we
1151 # If a 'missing' filenode thinks it belongs to a changenode we
1152 # assume the recipient must have, then the recipient must have
1152 # assume the recipient must have, then the recipient must have
1153 # that filenode.
1153 # that filenode.
1154 for n in msngset:
1154 for n in msngset:
1155 clnode = cl.node(filerevlog.linkrev(n))
1155 clnode = cl.node(filerevlog.linkrev(n))
1156 if clnode in has_cl_set:
1156 if clnode in has_cl_set:
1157 hasset[n] = 1
1157 hasset[n] = 1
1158 prune_parents(filerevlog, hasset, msngset)
1158 prune_parents(filerevlog, hasset, msngset)
1159
1159
1160 # A function generator function that sets up the a context for the
1160 # A function generator function that sets up the a context for the
1161 # inner function.
1161 # inner function.
1162 def lookup_filenode_link_func(fname):
1162 def lookup_filenode_link_func(fname):
1163 msngset = msng_filenode_set[fname]
1163 msngset = msng_filenode_set[fname]
1164 # Lookup the changenode the filenode belongs to.
1164 # Lookup the changenode the filenode belongs to.
1165 def lookup_filenode_link(fnode):
1165 def lookup_filenode_link(fnode):
1166 return msngset[fnode]
1166 return msngset[fnode]
1167 return lookup_filenode_link
1167 return lookup_filenode_link
1168
1168
1169 # Now that we have all theses utility functions to help out and
1169 # Now that we have all theses utility functions to help out and
1170 # logically divide up the task, generate the group.
1170 # logically divide up the task, generate the group.
1171 def gengroup():
1171 def gengroup():
1172 # The set of changed files starts empty.
1172 # The set of changed files starts empty.
1173 changedfiles = {}
1173 changedfiles = {}
1174 # Create a changenode group generator that will call our functions
1174 # Create a changenode group generator that will call our functions
1175 # back to lookup the owning changenode and collect information.
1175 # back to lookup the owning changenode and collect information.
1176 group = cl.group(msng_cl_lst, identity,
1176 group = cl.group(msng_cl_lst, identity,
1177 manifest_and_file_collector(changedfiles))
1177 manifest_and_file_collector(changedfiles))
1178 for chnk in group:
1178 for chnk in group:
1179 yield chnk
1179 yield chnk
1180
1180
1181 # The list of manifests has been collected by the generator
1181 # The list of manifests has been collected by the generator
1182 # calling our functions back.
1182 # calling our functions back.
1183 prune_manifests()
1183 prune_manifests()
1184 msng_mnfst_lst = msng_mnfst_set.keys()
1184 msng_mnfst_lst = msng_mnfst_set.keys()
1185 # Sort the manifestnodes by revision number.
1185 # Sort the manifestnodes by revision number.
1186 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1186 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1187 # Create a generator for the manifestnodes that calls our lookup
1187 # Create a generator for the manifestnodes that calls our lookup
1188 # and data collection functions back.
1188 # and data collection functions back.
1189 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1189 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1190 filenode_collector(changedfiles))
1190 filenode_collector(changedfiles))
1191 for chnk in group:
1191 for chnk in group:
1192 yield chnk
1192 yield chnk
1193
1193
1194 # These are no longer needed, dereference and toss the memory for
1194 # These are no longer needed, dereference and toss the memory for
1195 # them.
1195 # them.
1196 msng_mnfst_lst = None
1196 msng_mnfst_lst = None
1197 msng_mnfst_set.clear()
1197 msng_mnfst_set.clear()
1198
1198
1199 changedfiles = changedfiles.keys()
1199 changedfiles = changedfiles.keys()
1200 changedfiles.sort()
1200 changedfiles.sort()
1201 # Go through all our files in order sorted by name.
1201 # Go through all our files in order sorted by name.
1202 for fname in changedfiles:
1202 for fname in changedfiles:
1203 filerevlog = self.file(fname)
1203 filerevlog = self.file(fname)
1204 # Toss out the filenodes that the recipient isn't really
1204 # Toss out the filenodes that the recipient isn't really
1205 # missing.
1205 # missing.
1206 if msng_filenode_set.has_key(fname):
1206 if msng_filenode_set.has_key(fname):
1207 prune_filenodes(fname, filerevlog)
1207 prune_filenodes(fname, filerevlog)
1208 msng_filenode_lst = msng_filenode_set[fname].keys()
1208 msng_filenode_lst = msng_filenode_set[fname].keys()
1209 else:
1209 else:
1210 msng_filenode_lst = []
1210 msng_filenode_lst = []
1211 # If any filenodes are left, generate the group for them,
1211 # If any filenodes are left, generate the group for them,
1212 # otherwise don't bother.
1212 # otherwise don't bother.
1213 if len(msng_filenode_lst) > 0:
1213 if len(msng_filenode_lst) > 0:
1214 yield struct.pack(">l", len(fname) + 4) + fname
1214 yield struct.pack(">l", len(fname) + 4) + fname
1215 # Sort the filenodes by their revision #
1215 # Sort the filenodes by their revision #
1216 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1216 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1217 # Create a group generator and only pass in a changenode
1217 # Create a group generator and only pass in a changenode
1218 # lookup function as we need to collect no information
1218 # lookup function as we need to collect no information
1219 # from filenodes.
1219 # from filenodes.
1220 group = filerevlog.group(msng_filenode_lst,
1220 group = filerevlog.group(msng_filenode_lst,
1221 lookup_filenode_link_func(fname))
1221 lookup_filenode_link_func(fname))
1222 for chnk in group:
1222 for chnk in group:
1223 yield chnk
1223 yield chnk
1224 if msng_filenode_set.has_key(fname):
1224 if msng_filenode_set.has_key(fname):
1225 # Don't need this anymore, toss it to free memory.
1225 # Don't need this anymore, toss it to free memory.
1226 del msng_filenode_set[fname]
1226 del msng_filenode_set[fname]
1227 # Signal that no more groups are left.
1227 # Signal that no more groups are left.
1228 yield struct.pack(">l", 0)
1228 yield struct.pack(">l", 0)
1229
1229
1230 return util.chunkbuffer(gengroup())
1230 return util.chunkbuffer(gengroup())
1231
1231
1232 def changegroup(self, basenodes):
1232 def changegroup(self, basenodes):
1233 """Generate a changegroup of all nodes that we have that a recipient
1233 """Generate a changegroup of all nodes that we have that a recipient
1234 doesn't.
1234 doesn't.
1235
1235
1236 This is much easier than the previous function as we can assume that
1236 This is much easier than the previous function as we can assume that
1237 the recipient has any changenode we aren't sending them."""
1237 the recipient has any changenode we aren't sending them."""
1238 cl = self.changelog
1238 cl = self.changelog
1239 nodes = cl.nodesbetween(basenodes, None)[0]
1239 nodes = cl.nodesbetween(basenodes, None)[0]
1240 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1240 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1241
1241
1242 def identity(x):
1242 def identity(x):
1243 return x
1243 return x
1244
1244
1245 def gennodelst(revlog):
1245 def gennodelst(revlog):
1246 for r in xrange(0, revlog.count()):
1246 for r in xrange(0, revlog.count()):
1247 n = revlog.node(r)
1247 n = revlog.node(r)
1248 if revlog.linkrev(n) in revset:
1248 if revlog.linkrev(n) in revset:
1249 yield n
1249 yield n
1250
1250
1251 def changed_file_collector(changedfileset):
1251 def changed_file_collector(changedfileset):
1252 def collect_changed_files(clnode):
1252 def collect_changed_files(clnode):
1253 c = cl.read(clnode)
1253 c = cl.read(clnode)
1254 for fname in c[3]:
1254 for fname in c[3]:
1255 changedfileset[fname] = 1
1255 changedfileset[fname] = 1
1256 return collect_changed_files
1256 return collect_changed_files
1257
1257
1258 def lookuprevlink_func(revlog):
1258 def lookuprevlink_func(revlog):
1259 def lookuprevlink(n):
1259 def lookuprevlink(n):
1260 return cl.node(revlog.linkrev(n))
1260 return cl.node(revlog.linkrev(n))
1261 return lookuprevlink
1261 return lookuprevlink
1262
1262
1263 def gengroup():
1263 def gengroup():
1264 # construct a list of all changed files
1264 # construct a list of all changed files
1265 changedfiles = {}
1265 changedfiles = {}
1266
1266
1267 for chnk in cl.group(nodes, identity,
1267 for chnk in cl.group(nodes, identity,
1268 changed_file_collector(changedfiles)):
1268 changed_file_collector(changedfiles)):
1269 yield chnk
1269 yield chnk
1270 changedfiles = changedfiles.keys()
1270 changedfiles = changedfiles.keys()
1271 changedfiles.sort()
1271 changedfiles.sort()
1272
1272
1273 mnfst = self.manifest
1273 mnfst = self.manifest
1274 nodeiter = gennodelst(mnfst)
1274 nodeiter = gennodelst(mnfst)
1275 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1275 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1276 yield chnk
1276 yield chnk
1277
1277
1278 for fname in changedfiles:
1278 for fname in changedfiles:
1279 filerevlog = self.file(fname)
1279 filerevlog = self.file(fname)
1280 nodeiter = gennodelst(filerevlog)
1280 nodeiter = gennodelst(filerevlog)
1281 nodeiter = list(nodeiter)
1281 nodeiter = list(nodeiter)
1282 if nodeiter:
1282 if nodeiter:
1283 yield struct.pack(">l", len(fname) + 4) + fname
1283 yield struct.pack(">l", len(fname) + 4) + fname
1284 lookup = lookuprevlink_func(filerevlog)
1284 lookup = lookuprevlink_func(filerevlog)
1285 for chnk in filerevlog.group(nodeiter, lookup):
1285 for chnk in filerevlog.group(nodeiter, lookup):
1286 yield chnk
1286 yield chnk
1287
1287
1288 yield struct.pack(">l", 0)
1288 yield struct.pack(">l", 0)
1289
1289
1290 return util.chunkbuffer(gengroup())
1290 return util.chunkbuffer(gengroup())
1291
1291
1292 def addchangegroup(self, source):
1292 def addchangegroup(self, source):
1293
1293
1294 def getchunk():
1294 def getchunk():
1295 d = source.read(4)
1295 d = source.read(4)
1296 if not d:
1296 if not d:
1297 return ""
1297 return ""
1298 l = struct.unpack(">l", d)[0]
1298 l = struct.unpack(">l", d)[0]
1299 if l <= 4:
1299 if l <= 4:
1300 return ""
1300 return ""
1301 d = source.read(l - 4)
1301 d = source.read(l - 4)
1302 if len(d) < l - 4:
1302 if len(d) < l - 4:
1303 raise repo.RepoError(_("premature EOF reading chunk"
1303 raise repo.RepoError(_("premature EOF reading chunk"
1304 " (got %d bytes, expected %d)")
1304 " (got %d bytes, expected %d)")
1305 % (len(d), l - 4))
1305 % (len(d), l - 4))
1306 return d
1306 return d
1307
1307
1308 def getgroup():
1308 def getgroup():
1309 while 1:
1309 while 1:
1310 c = getchunk()
1310 c = getchunk()
1311 if not c:
1311 if not c:
1312 break
1312 break
1313 yield c
1313 yield c
1314
1314
1315 def csmap(x):
1315 def csmap(x):
1316 self.ui.debug(_("add changeset %s\n") % short(x))
1316 self.ui.debug(_("add changeset %s\n") % short(x))
1317 return self.changelog.count()
1317 return self.changelog.count()
1318
1318
1319 def revmap(x):
1319 def revmap(x):
1320 return self.changelog.rev(x)
1320 return self.changelog.rev(x)
1321
1321
1322 if not source:
1322 if not source:
1323 return
1323 return
1324 changesets = files = revisions = 0
1324 changesets = files = revisions = 0
1325
1325
1326 tr = self.transaction()
1326 tr = self.transaction()
1327
1327
1328 oldheads = len(self.changelog.heads())
1328 oldheads = len(self.changelog.heads())
1329
1329
1330 # pull off the changeset group
1330 # pull off the changeset group
1331 self.ui.status(_("adding changesets\n"))
1331 self.ui.status(_("adding changesets\n"))
1332 co = self.changelog.tip()
1332 co = self.changelog.tip()
1333 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1333 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1334 cnr, cor = map(self.changelog.rev, (cn, co))
1334 cnr, cor = map(self.changelog.rev, (cn, co))
1335 if cn == nullid:
1335 if cn == nullid:
1336 cnr = cor
1336 cnr = cor
1337 changesets = cnr - cor
1337 changesets = cnr - cor
1338
1338
1339 # pull off the manifest group
1339 # pull off the manifest group
1340 self.ui.status(_("adding manifests\n"))
1340 self.ui.status(_("adding manifests\n"))
1341 mm = self.manifest.tip()
1341 mm = self.manifest.tip()
1342 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1342 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1343
1343
1344 # process the files
1344 # process the files
1345 self.ui.status(_("adding file changes\n"))
1345 self.ui.status(_("adding file changes\n"))
1346 while 1:
1346 while 1:
1347 f = getchunk()
1347 f = getchunk()
1348 if not f:
1348 if not f:
1349 break
1349 break
1350 self.ui.debug(_("adding %s revisions\n") % f)
1350 self.ui.debug(_("adding %s revisions\n") % f)
1351 fl = self.file(f)
1351 fl = self.file(f)
1352 o = fl.count()
1352 o = fl.count()
1353 n = fl.addgroup(getgroup(), revmap, tr)
1353 n = fl.addgroup(getgroup(), revmap, tr)
1354 revisions += fl.count() - o
1354 revisions += fl.count() - o
1355 files += 1
1355 files += 1
1356
1356
1357 newheads = len(self.changelog.heads())
1357 newheads = len(self.changelog.heads())
1358 heads = ""
1358 heads = ""
1359 if oldheads and newheads > oldheads:
1359 if oldheads and newheads > oldheads:
1360 heads = _(" (+%d heads)") % (newheads - oldheads)
1360 heads = _(" (+%d heads)") % (newheads - oldheads)
1361
1361
1362 self.ui.status(_("added %d changesets"
1362 self.ui.status(_("added %d changesets"
1363 " with %d changes to %d files%s\n")
1363 " with %d changes to %d files%s\n")
1364 % (changesets, revisions, files, heads))
1364 % (changesets, revisions, files, heads))
1365
1365
1366 tr.close()
1366 tr.close()
1367
1367
1368 if changesets > 0:
1368 if changesets > 0:
1369 if not self.hook("changegroup",
1369 if not self.hook("changegroup",
1370 node=hex(self.changelog.node(cor+1))):
1370 node=hex(self.changelog.node(cor+1))):
1371 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1371 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1372 return 1
1372 return 1
1373
1373
1374 for i in range(cor + 1, cnr + 1):
1374 for i in range(cor + 1, cnr + 1):
1375 self.hook("commit", node=hex(self.changelog.node(i)))
1375 self.hook("commit", node=hex(self.changelog.node(i)))
1376
1376
1377 return
1377 return
1378
1378
1379 def update(self, node, allow=False, force=False, choose=None,
1379 def update(self, node, allow=False, force=False, choose=None,
1380 moddirstate=True, forcemerge=False):
1380 moddirstate=True, forcemerge=False):
1381 pl = self.dirstate.parents()
1381 pl = self.dirstate.parents()
1382 if not force and pl[1] != nullid:
1382 if not force and pl[1] != nullid:
1383 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1383 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1384 return 1
1384 return 1
1385
1385
1386 err = False
1386 err = False
1387
1387
1388 p1, p2 = pl[0], node
1388 p1, p2 = pl[0], node
1389 pa = self.changelog.ancestor(p1, p2)
1389 pa = self.changelog.ancestor(p1, p2)
1390 m1n = self.changelog.read(p1)[0]
1390 m1n = self.changelog.read(p1)[0]
1391 m2n = self.changelog.read(p2)[0]
1391 m2n = self.changelog.read(p2)[0]
1392 man = self.manifest.ancestor(m1n, m2n)
1392 man = self.manifest.ancestor(m1n, m2n)
1393 m1 = self.manifest.read(m1n)
1393 m1 = self.manifest.read(m1n)
1394 mf1 = self.manifest.readflags(m1n)
1394 mf1 = self.manifest.readflags(m1n)
1395 m2 = self.manifest.read(m2n).copy()
1395 m2 = self.manifest.read(m2n).copy()
1396 mf2 = self.manifest.readflags(m2n)
1396 mf2 = self.manifest.readflags(m2n)
1397 ma = self.manifest.read(man)
1397 ma = self.manifest.read(man)
1398 mfa = self.manifest.readflags(man)
1398 mfa = self.manifest.readflags(man)
1399
1399
1400 modified, added, removed, deleted, unknown = self.changes()
1400 modified, added, removed, deleted, unknown = self.changes()
1401
1401
1402 if allow and not forcemerge:
1402 if allow and not forcemerge:
1403 if modified or added or removed:
1403 if modified or added or removed:
1404 raise util.Abort(_("outstanding uncommited changes"))
1404 raise util.Abort(_("outstanding uncommited changes"))
1405 if not forcemerge and not force:
1405 if not forcemerge and not force:
1406 for f in unknown:
1406 for f in unknown:
1407 if f in m2:
1407 if f in m2:
1408 t1 = self.wread(f)
1408 t1 = self.wread(f)
1409 t2 = self.file(f).read(m2[f])
1409 t2 = self.file(f).read(m2[f])
1410 if cmp(t1, t2) != 0:
1410 if cmp(t1, t2) != 0:
1411 raise util.Abort(_("'%s' already exists in the working"
1411 raise util.Abort(_("'%s' already exists in the working"
1412 " dir and differs from remote") % f)
1412 " dir and differs from remote") % f)
1413
1413
1414 # is this a jump, or a merge? i.e. is there a linear path
1414 # is this a jump, or a merge? i.e. is there a linear path
1415 # from p1 to p2?
1415 # from p1 to p2?
1416 linear_path = (pa == p1 or pa == p2)
1416 linear_path = (pa == p1 or pa == p2)
1417
1417
1418 # resolve the manifest to determine which files
1418 # resolve the manifest to determine which files
1419 # we care about merging
1419 # we care about merging
1420 self.ui.note(_("resolving manifests\n"))
1420 self.ui.note(_("resolving manifests\n"))
1421 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1421 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1422 (force, allow, moddirstate, linear_path))
1422 (force, allow, moddirstate, linear_path))
1423 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1423 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1424 (short(man), short(m1n), short(m2n)))
1424 (short(man), short(m1n), short(m2n)))
1425
1425
1426 merge = {}
1426 merge = {}
1427 get = {}
1427 get = {}
1428 remove = []
1428 remove = []
1429
1429
1430 # construct a working dir manifest
1430 # construct a working dir manifest
1431 mw = m1.copy()
1431 mw = m1.copy()
1432 mfw = mf1.copy()
1432 mfw = mf1.copy()
1433 umap = dict.fromkeys(unknown)
1433 umap = dict.fromkeys(unknown)
1434
1434
1435 for f in added + modified + unknown:
1435 for f in added + modified + unknown:
1436 mw[f] = ""
1436 mw[f] = ""
1437 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1437 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1438
1438
1439 if moddirstate:
1439 if moddirstate:
1440 wlock = self.wlock()
1440 wlock = self.wlock()
1441
1441
1442 for f in deleted + removed:
1442 for f in deleted + removed:
1443 if f in mw:
1443 if f in mw:
1444 del mw[f]
1444 del mw[f]
1445
1445
1446 # If we're jumping between revisions (as opposed to merging),
1446 # If we're jumping between revisions (as opposed to merging),
1447 # and if neither the working directory nor the target rev has
1447 # and if neither the working directory nor the target rev has
1448 # the file, then we need to remove it from the dirstate, to
1448 # the file, then we need to remove it from the dirstate, to
1449 # prevent the dirstate from listing the file when it is no
1449 # prevent the dirstate from listing the file when it is no
1450 # longer in the manifest.
1450 # longer in the manifest.
1451 if moddirstate and linear_path and f not in m2:
1451 if moddirstate and linear_path and f not in m2:
1452 self.dirstate.forget((f,))
1452 self.dirstate.forget((f,))
1453
1453
1454 # Compare manifests
1454 # Compare manifests
1455 for f, n in mw.iteritems():
1455 for f, n in mw.iteritems():
1456 if choose and not choose(f):
1456 if choose and not choose(f):
1457 continue
1457 continue
1458 if f in m2:
1458 if f in m2:
1459 s = 0
1459 s = 0
1460
1460
1461 # is the wfile new since m1, and match m2?
1461 # is the wfile new since m1, and match m2?
1462 if f not in m1:
1462 if f not in m1:
1463 t1 = self.wread(f)
1463 t1 = self.wread(f)
1464 t2 = self.file(f).read(m2[f])
1464 t2 = self.file(f).read(m2[f])
1465 if cmp(t1, t2) == 0:
1465 if cmp(t1, t2) == 0:
1466 n = m2[f]
1466 n = m2[f]
1467 del t1, t2
1467 del t1, t2
1468
1468
1469 # are files different?
1469 # are files different?
1470 if n != m2[f]:
1470 if n != m2[f]:
1471 a = ma.get(f, nullid)
1471 a = ma.get(f, nullid)
1472 # are both different from the ancestor?
1472 # are both different from the ancestor?
1473 if n != a and m2[f] != a:
1473 if n != a and m2[f] != a:
1474 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1474 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1475 # merge executable bits
1475 # merge executable bits
1476 # "if we changed or they changed, change in merge"
1476 # "if we changed or they changed, change in merge"
1477 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1477 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1478 mode = ((a^b) | (a^c)) ^ a
1478 mode = ((a^b) | (a^c)) ^ a
1479 merge[f] = (m1.get(f, nullid), m2[f], mode)
1479 merge[f] = (m1.get(f, nullid), m2[f], mode)
1480 s = 1
1480 s = 1
1481 # are we clobbering?
1481 # are we clobbering?
1482 # is remote's version newer?
1482 # is remote's version newer?
1483 # or are we going back in time?
1483 # or are we going back in time?
1484 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1484 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1485 self.ui.debug(_(" remote %s is newer, get\n") % f)
1485 self.ui.debug(_(" remote %s is newer, get\n") % f)
1486 get[f] = m2[f]
1486 get[f] = m2[f]
1487 s = 1
1487 s = 1
1488 elif f in umap:
1488 elif f in umap:
1489 # this unknown file is the same as the checkout
1489 # this unknown file is the same as the checkout
1490 get[f] = m2[f]
1490 get[f] = m2[f]
1491
1491
1492 if not s and mfw[f] != mf2[f]:
1492 if not s and mfw[f] != mf2[f]:
1493 if force:
1493 if force:
1494 self.ui.debug(_(" updating permissions for %s\n") % f)
1494 self.ui.debug(_(" updating permissions for %s\n") % f)
1495 util.set_exec(self.wjoin(f), mf2[f])
1495 util.set_exec(self.wjoin(f), mf2[f])
1496 else:
1496 else:
1497 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1497 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1498 mode = ((a^b) | (a^c)) ^ a
1498 mode = ((a^b) | (a^c)) ^ a
1499 if mode != b:
1499 if mode != b:
1500 self.ui.debug(_(" updating permissions for %s\n")
1500 self.ui.debug(_(" updating permissions for %s\n")
1501 % f)
1501 % f)
1502 util.set_exec(self.wjoin(f), mode)
1502 util.set_exec(self.wjoin(f), mode)
1503 del m2[f]
1503 del m2[f]
1504 elif f in ma:
1504 elif f in ma:
1505 if n != ma[f]:
1505 if n != ma[f]:
1506 r = _("d")
1506 r = _("d")
1507 if not force and (linear_path or allow):
1507 if not force and (linear_path or allow):
1508 r = self.ui.prompt(
1508 r = self.ui.prompt(
1509 (_(" local changed %s which remote deleted\n") % f) +
1509 (_(" local changed %s which remote deleted\n") % f) +
1510 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1510 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1511 if r == _("d"):
1511 if r == _("d"):
1512 remove.append(f)
1512 remove.append(f)
1513 else:
1513 else:
1514 self.ui.debug(_("other deleted %s\n") % f)
1514 self.ui.debug(_("other deleted %s\n") % f)
1515 remove.append(f) # other deleted it
1515 remove.append(f) # other deleted it
1516 else:
1516 else:
1517 # file is created on branch or in working directory
1517 # file is created on branch or in working directory
1518 if force and f not in umap:
1518 if force and f not in umap:
1519 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1519 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1520 remove.append(f)
1520 remove.append(f)
1521 elif n == m1.get(f, nullid): # same as parent
1521 elif n == m1.get(f, nullid): # same as parent
1522 if p2 == pa: # going backwards?
1522 if p2 == pa: # going backwards?
1523 self.ui.debug(_("remote deleted %s\n") % f)
1523 self.ui.debug(_("remote deleted %s\n") % f)
1524 remove.append(f)
1524 remove.append(f)
1525 else:
1525 else:
1526 self.ui.debug(_("local modified %s, keeping\n") % f)
1526 self.ui.debug(_("local modified %s, keeping\n") % f)
1527 else:
1527 else:
1528 self.ui.debug(_("working dir created %s, keeping\n") % f)
1528 self.ui.debug(_("working dir created %s, keeping\n") % f)
1529
1529
1530 for f, n in m2.iteritems():
1530 for f, n in m2.iteritems():
1531 if choose and not choose(f):
1531 if choose and not choose(f):
1532 continue
1532 continue
1533 if f[0] == "/":
1533 if f[0] == "/":
1534 continue
1534 continue
1535 if f in ma and n != ma[f]:
1535 if f in ma and n != ma[f]:
1536 r = _("k")
1536 r = _("k")
1537 if not force and (linear_path or allow):
1537 if not force and (linear_path or allow):
1538 r = self.ui.prompt(
1538 r = self.ui.prompt(
1539 (_("remote changed %s which local deleted\n") % f) +
1539 (_("remote changed %s which local deleted\n") % f) +
1540 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1540 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1541 if r == _("k"):
1541 if r == _("k"):
1542 get[f] = n
1542 get[f] = n
1543 elif f not in ma:
1543 elif f not in ma:
1544 self.ui.debug(_("remote created %s\n") % f)
1544 self.ui.debug(_("remote created %s\n") % f)
1545 get[f] = n
1545 get[f] = n
1546 else:
1546 else:
1547 if force or p2 == pa: # going backwards?
1547 if force or p2 == pa: # going backwards?
1548 self.ui.debug(_("local deleted %s, recreating\n") % f)
1548 self.ui.debug(_("local deleted %s, recreating\n") % f)
1549 get[f] = n
1549 get[f] = n
1550 else:
1550 else:
1551 self.ui.debug(_("local deleted %s\n") % f)
1551 self.ui.debug(_("local deleted %s\n") % f)
1552
1552
1553 del mw, m1, m2, ma
1553 del mw, m1, m2, ma
1554
1554
1555 if force:
1555 if force:
1556 for f in merge:
1556 for f in merge:
1557 get[f] = merge[f][1]
1557 get[f] = merge[f][1]
1558 merge = {}
1558 merge = {}
1559
1559
1560 if linear_path or force:
1560 if linear_path or force:
1561 # we don't need to do any magic, just jump to the new rev
1561 # we don't need to do any magic, just jump to the new rev
1562 branch_merge = False
1562 branch_merge = False
1563 p1, p2 = p2, nullid
1563 p1, p2 = p2, nullid
1564 else:
1564 else:
1565 if not allow:
1565 if not allow:
1566 self.ui.status(_("this update spans a branch"
1566 self.ui.status(_("this update spans a branch"
1567 " affecting the following files:\n"))
1567 " affecting the following files:\n"))
1568 fl = merge.keys() + get.keys()
1568 fl = merge.keys() + get.keys()
1569 fl.sort()
1569 fl.sort()
1570 for f in fl:
1570 for f in fl:
1571 cf = ""
1571 cf = ""
1572 if f in merge:
1572 if f in merge:
1573 cf = _(" (resolve)")
1573 cf = _(" (resolve)")
1574 self.ui.status(" %s%s\n" % (f, cf))
1574 self.ui.status(" %s%s\n" % (f, cf))
1575 self.ui.warn(_("aborting update spanning branches!\n"))
1575 self.ui.warn(_("aborting update spanning branches!\n"))
1576 self.ui.status(_("(use update -m to merge across branches"
1576 self.ui.status(_("(use update -m to merge across branches"
1577 " or -C to lose changes)\n"))
1577 " or -C to lose changes)\n"))
1578 return 1
1578 return 1
1579 branch_merge = True
1579 branch_merge = True
1580
1580
1581 # get the files we don't need to change
1581 # get the files we don't need to change
1582 files = get.keys()
1582 files = get.keys()
1583 files.sort()
1583 files.sort()
1584 for f in files:
1584 for f in files:
1585 if f[0] == "/":
1585 if f[0] == "/":
1586 continue
1586 continue
1587 self.ui.note(_("getting %s\n") % f)
1587 self.ui.note(_("getting %s\n") % f)
1588 t = self.file(f).read(get[f])
1588 t = self.file(f).read(get[f])
1589 self.wwrite(f, t)
1589 self.wwrite(f, t)
1590 util.set_exec(self.wjoin(f), mf2[f])
1590 util.set_exec(self.wjoin(f), mf2[f])
1591 if moddirstate:
1591 if moddirstate:
1592 if branch_merge:
1592 if branch_merge:
1593 self.dirstate.update([f], 'n', st_mtime=-1)
1593 self.dirstate.update([f], 'n', st_mtime=-1)
1594 else:
1594 else:
1595 self.dirstate.update([f], 'n')
1595 self.dirstate.update([f], 'n')
1596
1596
1597 # merge the tricky bits
1597 # merge the tricky bits
1598 files = merge.keys()
1598 files = merge.keys()
1599 files.sort()
1599 files.sort()
1600 for f in files:
1600 for f in files:
1601 self.ui.status(_("merging %s\n") % f)
1601 self.ui.status(_("merging %s\n") % f)
1602 my, other, flag = merge[f]
1602 my, other, flag = merge[f]
1603 ret = self.merge3(f, my, other)
1603 ret = self.merge3(f, my, other)
1604 if ret:
1604 if ret:
1605 err = True
1605 err = True
1606 util.set_exec(self.wjoin(f), flag)
1606 util.set_exec(self.wjoin(f), flag)
1607 if moddirstate:
1607 if moddirstate:
1608 if branch_merge:
1608 if branch_merge:
1609 # We've done a branch merge, mark this file as merged
1609 # We've done a branch merge, mark this file as merged
1610 # so that we properly record the merger later
1610 # so that we properly record the merger later
1611 self.dirstate.update([f], 'm')
1611 self.dirstate.update([f], 'm')
1612 else:
1612 else:
1613 # We've update-merged a locally modified file, so
1613 # We've update-merged a locally modified file, so
1614 # we set the dirstate to emulate a normal checkout
1614 # we set the dirstate to emulate a normal checkout
1615 # of that file some time in the past. Thus our
1615 # of that file some time in the past. Thus our
1616 # merge will appear as a normal local file
1616 # merge will appear as a normal local file
1617 # modification.
1617 # modification.
1618 f_len = len(self.file(f).read(other))
1618 f_len = len(self.file(f).read(other))
1619 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1619 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1620
1620
1621 remove.sort()
1621 remove.sort()
1622 for f in remove:
1622 for f in remove:
1623 self.ui.note(_("removing %s\n") % f)
1623 self.ui.note(_("removing %s\n") % f)
1624 try:
1624 try:
1625 util.unlink(self.wjoin(f))
1625 util.unlink(self.wjoin(f))
1626 except OSError, inst:
1626 except OSError, inst:
1627 if inst.errno != errno.ENOENT:
1627 if inst.errno != errno.ENOENT:
1628 self.ui.warn(_("update failed to remove %s: %s!\n") %
1628 self.ui.warn(_("update failed to remove %s: %s!\n") %
1629 (f, inst.strerror))
1629 (f, inst.strerror))
1630 if moddirstate:
1630 if moddirstate:
1631 if branch_merge:
1631 if branch_merge:
1632 self.dirstate.update(remove, 'r')
1632 self.dirstate.update(remove, 'r')
1633 else:
1633 else:
1634 self.dirstate.forget(remove)
1634 self.dirstate.forget(remove)
1635
1635
1636 if moddirstate:
1636 if moddirstate:
1637 self.dirstate.setparents(p1, p2)
1637 self.dirstate.setparents(p1, p2)
1638 return err
1638 return err
1639
1639
1640 def merge3(self, fn, my, other):
1640 def merge3(self, fn, my, other):
1641 """perform a 3-way merge in the working directory"""
1641 """perform a 3-way merge in the working directory"""
1642
1642
1643 def temp(prefix, node):
1643 def temp(prefix, node):
1644 pre = "%s~%s." % (os.path.basename(fn), prefix)
1644 pre = "%s~%s." % (os.path.basename(fn), prefix)
1645 (fd, name) = tempfile.mkstemp("", pre)
1645 (fd, name) = tempfile.mkstemp("", pre)
1646 f = os.fdopen(fd, "wb")
1646 f = os.fdopen(fd, "wb")
1647 self.wwrite(fn, fl.read(node), f)
1647 self.wwrite(fn, fl.read(node), f)
1648 f.close()
1648 f.close()
1649 return name
1649 return name
1650
1650
1651 fl = self.file(fn)
1651 fl = self.file(fn)
1652 base = fl.ancestor(my, other)
1652 base = fl.ancestor(my, other)
1653 a = self.wjoin(fn)
1653 a = self.wjoin(fn)
1654 b = temp("base", base)
1654 b = temp("base", base)
1655 c = temp("other", other)
1655 c = temp("other", other)
1656
1656
1657 self.ui.note(_("resolving %s\n") % fn)
1657 self.ui.note(_("resolving %s\n") % fn)
1658 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1658 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1659 (fn, short(my), short(other), short(base)))
1659 (fn, short(my), short(other), short(base)))
1660
1660
1661 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1661 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1662 or "hgmerge")
1662 or "hgmerge")
1663 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1663 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1664 if r:
1664 if r:
1665 self.ui.warn(_("merging %s failed!\n") % fn)
1665 self.ui.warn(_("merging %s failed!\n") % fn)
1666
1666
1667 os.unlink(b)
1667 os.unlink(b)
1668 os.unlink(c)
1668 os.unlink(c)
1669 return r
1669 return r
1670
1670
1671 def verify(self):
1671 def verify(self):
1672 filelinkrevs = {}
1672 filelinkrevs = {}
1673 filenodes = {}
1673 filenodes = {}
1674 changesets = revisions = files = 0
1674 changesets = revisions = files = 0
1675 errors = [0]
1675 errors = [0]
1676 neededmanifests = {}
1676 neededmanifests = {}
1677
1677
1678 def err(msg):
1678 def err(msg):
1679 self.ui.warn(msg + "\n")
1679 self.ui.warn(msg + "\n")
1680 errors[0] += 1
1680 errors[0] += 1
1681
1681
1682 def checksize(obj, name):
1683 d = obj.checksize()
1684 if d[0]:
1685 err(_("%s data length off by %d bytes") % (name, d[0]))
1686 if d[1]:
1687 err(_("%s index contains %d extra bytes") % (name, d[1]))
1688
1682 seen = {}
1689 seen = {}
1683 self.ui.status(_("checking changesets\n"))
1690 self.ui.status(_("checking changesets\n"))
1684 d = self.changelog.checksize()
1691 checksize(self.changelog, "changelog")
1685 if d:
1692
1686 err(_("changeset data short %d bytes") % d)
1687 for i in range(self.changelog.count()):
1693 for i in range(self.changelog.count()):
1688 changesets += 1
1694 changesets += 1
1689 n = self.changelog.node(i)
1695 n = self.changelog.node(i)
1690 l = self.changelog.linkrev(n)
1696 l = self.changelog.linkrev(n)
1691 if l != i:
1697 if l != i:
1692 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1698 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1693 if n in seen:
1699 if n in seen:
1694 err(_("duplicate changeset at revision %d") % i)
1700 err(_("duplicate changeset at revision %d") % i)
1695 seen[n] = 1
1701 seen[n] = 1
1696
1702
1697 for p in self.changelog.parents(n):
1703 for p in self.changelog.parents(n):
1698 if p not in self.changelog.nodemap:
1704 if p not in self.changelog.nodemap:
1699 err(_("changeset %s has unknown parent %s") %
1705 err(_("changeset %s has unknown parent %s") %
1700 (short(n), short(p)))
1706 (short(n), short(p)))
1701 try:
1707 try:
1702 changes = self.changelog.read(n)
1708 changes = self.changelog.read(n)
1703 except KeyboardInterrupt:
1709 except KeyboardInterrupt:
1704 self.ui.warn(_("interrupted"))
1710 self.ui.warn(_("interrupted"))
1705 raise
1711 raise
1706 except Exception, inst:
1712 except Exception, inst:
1707 err(_("unpacking changeset %s: %s") % (short(n), inst))
1713 err(_("unpacking changeset %s: %s") % (short(n), inst))
1708
1714
1709 neededmanifests[changes[0]] = n
1715 neededmanifests[changes[0]] = n
1710
1716
1711 for f in changes[3]:
1717 for f in changes[3]:
1712 filelinkrevs.setdefault(f, []).append(i)
1718 filelinkrevs.setdefault(f, []).append(i)
1713
1719
1714 seen = {}
1720 seen = {}
1715 self.ui.status(_("checking manifests\n"))
1721 self.ui.status(_("checking manifests\n"))
1716 d = self.manifest.checksize()
1722 checksize(self.manifest, "manifest")
1717 if d:
1723
1718 err(_("manifest data short %d bytes") % d)
1719 for i in range(self.manifest.count()):
1724 for i in range(self.manifest.count()):
1720 n = self.manifest.node(i)
1725 n = self.manifest.node(i)
1721 l = self.manifest.linkrev(n)
1726 l = self.manifest.linkrev(n)
1722
1727
1723 if l < 0 or l >= self.changelog.count():
1728 if l < 0 or l >= self.changelog.count():
1724 err(_("bad manifest link (%d) at revision %d") % (l, i))
1729 err(_("bad manifest link (%d) at revision %d") % (l, i))
1725
1730
1726 if n in neededmanifests:
1731 if n in neededmanifests:
1727 del neededmanifests[n]
1732 del neededmanifests[n]
1728
1733
1729 if n in seen:
1734 if n in seen:
1730 err(_("duplicate manifest at revision %d") % i)
1735 err(_("duplicate manifest at revision %d") % i)
1731
1736
1732 seen[n] = 1
1737 seen[n] = 1
1733
1738
1734 for p in self.manifest.parents(n):
1739 for p in self.manifest.parents(n):
1735 if p not in self.manifest.nodemap:
1740 if p not in self.manifest.nodemap:
1736 err(_("manifest %s has unknown parent %s") %
1741 err(_("manifest %s has unknown parent %s") %
1737 (short(n), short(p)))
1742 (short(n), short(p)))
1738
1743
1739 try:
1744 try:
1740 delta = mdiff.patchtext(self.manifest.delta(n))
1745 delta = mdiff.patchtext(self.manifest.delta(n))
1741 except KeyboardInterrupt:
1746 except KeyboardInterrupt:
1742 self.ui.warn(_("interrupted"))
1747 self.ui.warn(_("interrupted"))
1743 raise
1748 raise
1744 except Exception, inst:
1749 except Exception, inst:
1745 err(_("unpacking manifest %s: %s") % (short(n), inst))
1750 err(_("unpacking manifest %s: %s") % (short(n), inst))
1746
1751
1747 ff = [ l.split('\0') for l in delta.splitlines() ]
1752 ff = [ l.split('\0') for l in delta.splitlines() ]
1748 for f, fn in ff:
1753 for f, fn in ff:
1749 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1754 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1750
1755
1751 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1756 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1752
1757
1753 for m, c in neededmanifests.items():
1758 for m, c in neededmanifests.items():
1754 err(_("Changeset %s refers to unknown manifest %s") %
1759 err(_("Changeset %s refers to unknown manifest %s") %
1755 (short(m), short(c)))
1760 (short(m), short(c)))
1756 del neededmanifests
1761 del neededmanifests
1757
1762
1758 for f in filenodes:
1763 for f in filenodes:
1759 if f not in filelinkrevs:
1764 if f not in filelinkrevs:
1760 err(_("file %s in manifest but not in changesets") % f)
1765 err(_("file %s in manifest but not in changesets") % f)
1761
1766
1762 for f in filelinkrevs:
1767 for f in filelinkrevs:
1763 if f not in filenodes:
1768 if f not in filenodes:
1764 err(_("file %s in changeset but not in manifest") % f)
1769 err(_("file %s in changeset but not in manifest") % f)
1765
1770
1766 self.ui.status(_("checking files\n"))
1771 self.ui.status(_("checking files\n"))
1767 ff = filenodes.keys()
1772 ff = filenodes.keys()
1768 ff.sort()
1773 ff.sort()
1769 for f in ff:
1774 for f in ff:
1770 if f == "/dev/null":
1775 if f == "/dev/null":
1771 continue
1776 continue
1772 files += 1
1777 files += 1
1773 fl = self.file(f)
1778 fl = self.file(f)
1774 d = fl.checksize()
1779 checksize(fl, f)
1775 if d:
1776 err(_("%s file data short %d bytes") % (f, d))
1777
1780
1778 nodes = {nullid: 1}
1781 nodes = {nullid: 1}
1779 seen = {}
1782 seen = {}
1780 for i in range(fl.count()):
1783 for i in range(fl.count()):
1781 revisions += 1
1784 revisions += 1
1782 n = fl.node(i)
1785 n = fl.node(i)
1783
1786
1784 if n in seen:
1787 if n in seen:
1785 err(_("%s: duplicate revision %d") % (f, i))
1788 err(_("%s: duplicate revision %d") % (f, i))
1786 if n not in filenodes[f]:
1789 if n not in filenodes[f]:
1787 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1790 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1788 else:
1791 else:
1789 del filenodes[f][n]
1792 del filenodes[f][n]
1790
1793
1791 flr = fl.linkrev(n)
1794 flr = fl.linkrev(n)
1792 if flr not in filelinkrevs[f]:
1795 if flr not in filelinkrevs[f]:
1793 err(_("%s:%s points to unexpected changeset %d")
1796 err(_("%s:%s points to unexpected changeset %d")
1794 % (f, short(n), flr))
1797 % (f, short(n), flr))
1795 else:
1798 else:
1796 filelinkrevs[f].remove(flr)
1799 filelinkrevs[f].remove(flr)
1797
1800
1798 # verify contents
1801 # verify contents
1799 try:
1802 try:
1800 t = fl.read(n)
1803 t = fl.read(n)
1801 except KeyboardInterrupt:
1804 except KeyboardInterrupt:
1802 self.ui.warn(_("interrupted"))
1805 self.ui.warn(_("interrupted"))
1803 raise
1806 raise
1804 except Exception, inst:
1807 except Exception, inst:
1805 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1808 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1806
1809
1807 # verify parents
1810 # verify parents
1808 (p1, p2) = fl.parents(n)
1811 (p1, p2) = fl.parents(n)
1809 if p1 not in nodes:
1812 if p1 not in nodes:
1810 err(_("file %s:%s unknown parent 1 %s") %
1813 err(_("file %s:%s unknown parent 1 %s") %
1811 (f, short(n), short(p1)))
1814 (f, short(n), short(p1)))
1812 if p2 not in nodes:
1815 if p2 not in nodes:
1813 err(_("file %s:%s unknown parent 2 %s") %
1816 err(_("file %s:%s unknown parent 2 %s") %
1814 (f, short(n), short(p1)))
1817 (f, short(n), short(p1)))
1815 nodes[n] = 1
1818 nodes[n] = 1
1816
1819
1817 # cross-check
1820 # cross-check
1818 for node in filenodes[f]:
1821 for node in filenodes[f]:
1819 err(_("node %s in manifests not in %s") % (hex(node), f))
1822 err(_("node %s in manifests not in %s") % (hex(node), f))
1820
1823
1821 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1824 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1822 (files, changesets, revisions))
1825 (files, changesets, revisions))
1823
1826
1824 if errors[0]:
1827 if errors[0]:
1825 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1828 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1826 return 1
1829 return 1
@@ -1,854 +1,869 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
16 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
17
17
18 def hash(text, p1, p2):
18 def hash(text, p1, p2):
19 """generate a hash from the given text and its parent hashes
19 """generate a hash from the given text and its parent hashes
20
20
21 This hash combines both the current file contents and its history
21 This hash combines both the current file contents and its history
22 in a manner that makes it easy to distinguish nodes with the same
22 in a manner that makes it easy to distinguish nodes with the same
23 content in the revision graph.
23 content in the revision graph.
24 """
24 """
25 l = [p1, p2]
25 l = [p1, p2]
26 l.sort()
26 l.sort()
27 s = sha.new(l[0])
27 s = sha.new(l[0])
28 s.update(l[1])
28 s.update(l[1])
29 s.update(text)
29 s.update(text)
30 return s.digest()
30 return s.digest()
31
31
32 def compress(text):
32 def compress(text):
33 """ generate a possibly-compressed representation of text """
33 """ generate a possibly-compressed representation of text """
34 if not text: return ("", text)
34 if not text: return ("", text)
35 if len(text) < 44:
35 if len(text) < 44:
36 if text[0] == '\0': return ("", text)
36 if text[0] == '\0': return ("", text)
37 return ('u', text)
37 return ('u', text)
38 bin = zlib.compress(text)
38 bin = zlib.compress(text)
39 if len(bin) > len(text):
39 if len(bin) > len(text):
40 if text[0] == '\0': return ("", text)
40 if text[0] == '\0': return ("", text)
41 return ('u', text)
41 return ('u', text)
42 return ("", bin)
42 return ("", bin)
43
43
44 def decompress(bin):
44 def decompress(bin):
45 """ decompress the given input """
45 """ decompress the given input """
46 if not bin: return bin
46 if not bin: return bin
47 t = bin[0]
47 t = bin[0]
48 if t == '\0': return bin
48 if t == '\0': return bin
49 if t == 'x': return zlib.decompress(bin)
49 if t == 'x': return zlib.decompress(bin)
50 if t == 'u': return bin[1:]
50 if t == 'u': return bin[1:]
51 raise RevlogError(_("unknown compression type %s") % t)
51 raise RevlogError(_("unknown compression type %s") % t)
52
52
53 indexformat = ">4l20s20s20s"
53 indexformat = ">4l20s20s20s"
54
54
55 class lazyparser(object):
55 class lazyparser(object):
56 """
56 """
57 this class avoids the need to parse the entirety of large indices
57 this class avoids the need to parse the entirety of large indices
58
58
59 By default we parse and load 1000 entries at a time.
59 By default we parse and load 1000 entries at a time.
60
60
61 If no position is specified, we load the whole index, and replace
61 If no position is specified, we load the whole index, and replace
62 the lazy objects in revlog with the underlying objects for
62 the lazy objects in revlog with the underlying objects for
63 efficiency in cases where we look at most of the nodes.
63 efficiency in cases where we look at most of the nodes.
64 """
64 """
65 def __init__(self, data, revlog):
65 def __init__(self, data, revlog):
66 self.data = data
66 self.data = data
67 self.s = struct.calcsize(indexformat)
67 self.s = struct.calcsize(indexformat)
68 self.l = len(data)/self.s
68 self.l = len(data)/self.s
69 self.index = [None] * self.l
69 self.index = [None] * self.l
70 self.map = {nullid: -1}
70 self.map = {nullid: -1}
71 self.all = 0
71 self.all = 0
72 self.revlog = revlog
72 self.revlog = revlog
73
73
74 def trunc(self, pos):
74 def trunc(self, pos):
75 self.l = pos/self.s
75 self.l = pos/self.s
76
76
77 def load(self, pos=None):
77 def load(self, pos=None):
78 if self.all: return
78 if self.all: return
79 if pos is not None:
79 if pos is not None:
80 block = pos / 1000
80 block = pos / 1000
81 i = block * 1000
81 i = block * 1000
82 end = min(self.l, i + 1000)
82 end = min(self.l, i + 1000)
83 else:
83 else:
84 self.all = 1
84 self.all = 1
85 i = 0
85 i = 0
86 end = self.l
86 end = self.l
87 self.revlog.index = self.index
87 self.revlog.index = self.index
88 self.revlog.nodemap = self.map
88 self.revlog.nodemap = self.map
89
89
90 while i < end:
90 while i < end:
91 d = self.data[i * self.s: (i + 1) * self.s]
91 d = self.data[i * self.s: (i + 1) * self.s]
92 e = struct.unpack(indexformat, d)
92 e = struct.unpack(indexformat, d)
93 self.index[i] = e
93 self.index[i] = e
94 self.map[e[6]] = i
94 self.map[e[6]] = i
95 i += 1
95 i += 1
96
96
97 class lazyindex(object):
97 class lazyindex(object):
98 """a lazy version of the index array"""
98 """a lazy version of the index array"""
99 def __init__(self, parser):
99 def __init__(self, parser):
100 self.p = parser
100 self.p = parser
101 def __len__(self):
101 def __len__(self):
102 return len(self.p.index)
102 return len(self.p.index)
103 def load(self, pos):
103 def load(self, pos):
104 if pos < 0:
104 if pos < 0:
105 pos += len(self.p.index)
105 pos += len(self.p.index)
106 self.p.load(pos)
106 self.p.load(pos)
107 return self.p.index[pos]
107 return self.p.index[pos]
108 def __getitem__(self, pos):
108 def __getitem__(self, pos):
109 return self.p.index[pos] or self.load(pos)
109 return self.p.index[pos] or self.load(pos)
110 def __delitem__(self, pos):
110 def __delitem__(self, pos):
111 del self.p.index[pos]
111 del self.p.index[pos]
112 def append(self, e):
112 def append(self, e):
113 self.p.index.append(e)
113 self.p.index.append(e)
114 def trunc(self, pos):
114 def trunc(self, pos):
115 self.p.trunc(pos)
115 self.p.trunc(pos)
116
116
117 class lazymap(object):
117 class lazymap(object):
118 """a lazy version of the node map"""
118 """a lazy version of the node map"""
119 def __init__(self, parser):
119 def __init__(self, parser):
120 self.p = parser
120 self.p = parser
121 def load(self, key):
121 def load(self, key):
122 if self.p.all: return
122 if self.p.all: return
123 n = self.p.data.find(key)
123 n = self.p.data.find(key)
124 if n < 0:
124 if n < 0:
125 raise KeyError(key)
125 raise KeyError(key)
126 pos = n / self.p.s
126 pos = n / self.p.s
127 self.p.load(pos)
127 self.p.load(pos)
128 def __contains__(self, key):
128 def __contains__(self, key):
129 self.p.load()
129 self.p.load()
130 return key in self.p.map
130 return key in self.p.map
131 def __iter__(self):
131 def __iter__(self):
132 yield nullid
132 yield nullid
133 for i in xrange(self.p.l):
133 for i in xrange(self.p.l):
134 try:
134 try:
135 yield self.p.index[i][6]
135 yield self.p.index[i][6]
136 except:
136 except:
137 self.p.load(i)
137 self.p.load(i)
138 yield self.p.index[i][6]
138 yield self.p.index[i][6]
139 def __getitem__(self, key):
139 def __getitem__(self, key):
140 try:
140 try:
141 return self.p.map[key]
141 return self.p.map[key]
142 except KeyError:
142 except KeyError:
143 try:
143 try:
144 self.load(key)
144 self.load(key)
145 return self.p.map[key]
145 return self.p.map[key]
146 except KeyError:
146 except KeyError:
147 raise KeyError("node " + hex(key))
147 raise KeyError("node " + hex(key))
148 def __setitem__(self, key, val):
148 def __setitem__(self, key, val):
149 self.p.map[key] = val
149 self.p.map[key] = val
150 def __delitem__(self, key):
150 def __delitem__(self, key):
151 del self.p.map[key]
151 del self.p.map[key]
152
152
153 class RevlogError(Exception): pass
153 class RevlogError(Exception): pass
154
154
155 class revlog(object):
155 class revlog(object):
156 """
156 """
157 the underlying revision storage object
157 the underlying revision storage object
158
158
159 A revlog consists of two parts, an index and the revision data.
159 A revlog consists of two parts, an index and the revision data.
160
160
161 The index is a file with a fixed record size containing
161 The index is a file with a fixed record size containing
162 information on each revision, includings its nodeid (hash), the
162 information on each revision, includings its nodeid (hash), the
163 nodeids of its parents, the position and offset of its data within
163 nodeids of its parents, the position and offset of its data within
164 the data file, and the revision it's based on. Finally, each entry
164 the data file, and the revision it's based on. Finally, each entry
165 contains a linkrev entry that can serve as a pointer to external
165 contains a linkrev entry that can serve as a pointer to external
166 data.
166 data.
167
167
168 The revision data itself is a linear collection of data chunks.
168 The revision data itself is a linear collection of data chunks.
169 Each chunk represents a revision and is usually represented as a
169 Each chunk represents a revision and is usually represented as a
170 delta against the previous chunk. To bound lookup time, runs of
170 delta against the previous chunk. To bound lookup time, runs of
171 deltas are limited to about 2 times the length of the original
171 deltas are limited to about 2 times the length of the original
172 version data. This makes retrieval of a version proportional to
172 version data. This makes retrieval of a version proportional to
173 its size, or O(1) relative to the number of revisions.
173 its size, or O(1) relative to the number of revisions.
174
174
175 Both pieces of the revlog are written to in an append-only
175 Both pieces of the revlog are written to in an append-only
176 fashion, which means we never need to rewrite a file to insert or
176 fashion, which means we never need to rewrite a file to insert or
177 remove data, and can use some simple techniques to avoid the need
177 remove data, and can use some simple techniques to avoid the need
178 for locking while reading.
178 for locking while reading.
179 """
179 """
180 def __init__(self, opener, indexfile, datafile):
180 def __init__(self, opener, indexfile, datafile):
181 """
181 """
182 create a revlog object
182 create a revlog object
183
183
184 opener is a function that abstracts the file opening operation
184 opener is a function that abstracts the file opening operation
185 and can be used to implement COW semantics or the like.
185 and can be used to implement COW semantics or the like.
186 """
186 """
187 self.indexfile = indexfile
187 self.indexfile = indexfile
188 self.datafile = datafile
188 self.datafile = datafile
189 self.opener = opener
189 self.opener = opener
190 self.cache = None
190 self.cache = None
191 self.chunkcache = None
191 self.chunkcache = None
192
192
193 try:
193 try:
194 i = self.opener(self.indexfile).read()
194 i = self.opener(self.indexfile).read()
195 except IOError, inst:
195 except IOError, inst:
196 if inst.errno != errno.ENOENT:
196 if inst.errno != errno.ENOENT:
197 raise
197 raise
198 i = ""
198 i = ""
199
199
200 if i and i[:4] != "\0\0\0\0":
200 if i and i[:4] != "\0\0\0\0":
201 raise RevlogError(_("incompatible revlog signature on %s") %
201 raise RevlogError(_("incompatible revlog signature on %s") %
202 self.indexfile)
202 self.indexfile)
203
203
204 if len(i) > 10000:
204 if len(i) > 10000:
205 # big index, let's parse it on demand
205 # big index, let's parse it on demand
206 parser = lazyparser(i, self)
206 parser = lazyparser(i, self)
207 self.index = lazyindex(parser)
207 self.index = lazyindex(parser)
208 self.nodemap = lazymap(parser)
208 self.nodemap = lazymap(parser)
209 else:
209 else:
210 s = struct.calcsize(indexformat)
210 s = struct.calcsize(indexformat)
211 l = len(i) / s
211 l = len(i) / s
212 self.index = [None] * l
212 self.index = [None] * l
213 m = [None] * l
213 m = [None] * l
214
214
215 n = 0
215 n = 0
216 for f in xrange(0, len(i), s):
216 for f in xrange(0, l * s, s):
217 # offset, size, base, linkrev, p1, p2, nodeid
217 # offset, size, base, linkrev, p1, p2, nodeid
218 e = struct.unpack(indexformat, i[f:f + s])
218 e = struct.unpack(indexformat, i[f:f + s])
219 m[n] = (e[6], n)
219 m[n] = (e[6], n)
220 self.index[n] = e
220 self.index[n] = e
221 n += 1
221 n += 1
222
222
223 self.nodemap = dict(m)
223 self.nodemap = dict(m)
224 self.nodemap[nullid] = -1
224 self.nodemap[nullid] = -1
225
225
226 def tip(self): return self.node(len(self.index) - 1)
226 def tip(self): return self.node(len(self.index) - 1)
227 def count(self): return len(self.index)
227 def count(self): return len(self.index)
228 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
228 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
229 def rev(self, node):
229 def rev(self, node):
230 try:
230 try:
231 return self.nodemap[node]
231 return self.nodemap[node]
232 except KeyError:
232 except KeyError:
233 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
233 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
234 def linkrev(self, node): return self.index[self.rev(node)][3]
234 def linkrev(self, node): return self.index[self.rev(node)][3]
235 def parents(self, node):
235 def parents(self, node):
236 if node == nullid: return (nullid, nullid)
236 if node == nullid: return (nullid, nullid)
237 return self.index[self.rev(node)][4:6]
237 return self.index[self.rev(node)][4:6]
238
238
239 def start(self, rev): return self.index[rev][0]
239 def start(self, rev): return self.index[rev][0]
240 def length(self, rev): return self.index[rev][1]
240 def length(self, rev): return self.index[rev][1]
241 def end(self, rev): return self.start(rev) + self.length(rev)
241 def end(self, rev): return self.start(rev) + self.length(rev)
242 def base(self, rev): return self.index[rev][2]
242 def base(self, rev): return self.index[rev][2]
243
243
244 def reachable(self, rev, stop=None):
244 def reachable(self, rev, stop=None):
245 reachable = {}
245 reachable = {}
246 visit = [rev]
246 visit = [rev]
247 reachable[rev] = 1
247 reachable[rev] = 1
248 if stop:
248 if stop:
249 stopn = self.rev(stop)
249 stopn = self.rev(stop)
250 else:
250 else:
251 stopn = 0
251 stopn = 0
252 while visit:
252 while visit:
253 n = visit.pop(0)
253 n = visit.pop(0)
254 if n == stop:
254 if n == stop:
255 continue
255 continue
256 if n == nullid:
256 if n == nullid:
257 continue
257 continue
258 for p in self.parents(n):
258 for p in self.parents(n):
259 if self.rev(p) < stopn:
259 if self.rev(p) < stopn:
260 continue
260 continue
261 if p not in reachable:
261 if p not in reachable:
262 reachable[p] = 1
262 reachable[p] = 1
263 visit.append(p)
263 visit.append(p)
264 return reachable
264 return reachable
265
265
266 def nodesbetween(self, roots=None, heads=None):
266 def nodesbetween(self, roots=None, heads=None):
267 """Return a tuple containing three elements. Elements 1 and 2 contain
267 """Return a tuple containing three elements. Elements 1 and 2 contain
268 a final list bases and heads after all the unreachable ones have been
268 a final list bases and heads after all the unreachable ones have been
269 pruned. Element 0 contains a topologically sorted list of all
269 pruned. Element 0 contains a topologically sorted list of all
270
270
271 nodes that satisfy these constraints:
271 nodes that satisfy these constraints:
272 1. All nodes must be descended from a node in roots (the nodes on
272 1. All nodes must be descended from a node in roots (the nodes on
273 roots are considered descended from themselves).
273 roots are considered descended from themselves).
274 2. All nodes must also be ancestors of a node in heads (the nodes in
274 2. All nodes must also be ancestors of a node in heads (the nodes in
275 heads are considered to be their own ancestors).
275 heads are considered to be their own ancestors).
276
276
277 If roots is unspecified, nullid is assumed as the only root.
277 If roots is unspecified, nullid is assumed as the only root.
278 If heads is unspecified, it is taken to be the output of the
278 If heads is unspecified, it is taken to be the output of the
279 heads method (i.e. a list of all nodes in the repository that
279 heads method (i.e. a list of all nodes in the repository that
280 have no children)."""
280 have no children)."""
281 nonodes = ([], [], [])
281 nonodes = ([], [], [])
282 if roots is not None:
282 if roots is not None:
283 roots = list(roots)
283 roots = list(roots)
284 if not roots:
284 if not roots:
285 return nonodes
285 return nonodes
286 lowestrev = min([self.rev(n) for n in roots])
286 lowestrev = min([self.rev(n) for n in roots])
287 else:
287 else:
288 roots = [nullid] # Everybody's a descendent of nullid
288 roots = [nullid] # Everybody's a descendent of nullid
289 lowestrev = -1
289 lowestrev = -1
290 if (lowestrev == -1) and (heads is None):
290 if (lowestrev == -1) and (heads is None):
291 # We want _all_ the nodes!
291 # We want _all_ the nodes!
292 return ([self.node(r) for r in xrange(0, self.count())],
292 return ([self.node(r) for r in xrange(0, self.count())],
293 [nullid], list(self.heads()))
293 [nullid], list(self.heads()))
294 if heads is None:
294 if heads is None:
295 # All nodes are ancestors, so the latest ancestor is the last
295 # All nodes are ancestors, so the latest ancestor is the last
296 # node.
296 # node.
297 highestrev = self.count() - 1
297 highestrev = self.count() - 1
298 # Set ancestors to None to signal that every node is an ancestor.
298 # Set ancestors to None to signal that every node is an ancestor.
299 ancestors = None
299 ancestors = None
300 # Set heads to an empty dictionary for later discovery of heads
300 # Set heads to an empty dictionary for later discovery of heads
301 heads = {}
301 heads = {}
302 else:
302 else:
303 heads = list(heads)
303 heads = list(heads)
304 if not heads:
304 if not heads:
305 return nonodes
305 return nonodes
306 ancestors = {}
306 ancestors = {}
307 # Start at the top and keep marking parents until we're done.
307 # Start at the top and keep marking parents until we're done.
308 nodestotag = heads[:]
308 nodestotag = heads[:]
309 # Turn heads into a dictionary so we can remove 'fake' heads.
309 # Turn heads into a dictionary so we can remove 'fake' heads.
310 # Also, later we will be using it to filter out the heads we can't
310 # Also, later we will be using it to filter out the heads we can't
311 # find from roots.
311 # find from roots.
312 heads = dict.fromkeys(heads, 0)
312 heads = dict.fromkeys(heads, 0)
313 # Remember where the top was so we can use it as a limit later.
313 # Remember where the top was so we can use it as a limit later.
314 highestrev = max([self.rev(n) for n in nodestotag])
314 highestrev = max([self.rev(n) for n in nodestotag])
315 while nodestotag:
315 while nodestotag:
316 # grab a node to tag
316 # grab a node to tag
317 n = nodestotag.pop()
317 n = nodestotag.pop()
318 # Never tag nullid
318 # Never tag nullid
319 if n == nullid:
319 if n == nullid:
320 continue
320 continue
321 # A node's revision number represents its place in a
321 # A node's revision number represents its place in a
322 # topologically sorted list of nodes.
322 # topologically sorted list of nodes.
323 r = self.rev(n)
323 r = self.rev(n)
324 if r >= lowestrev:
324 if r >= lowestrev:
325 if n not in ancestors:
325 if n not in ancestors:
326 # If we are possibly a descendent of one of the roots
326 # If we are possibly a descendent of one of the roots
327 # and we haven't already been marked as an ancestor
327 # and we haven't already been marked as an ancestor
328 ancestors[n] = 1 # Mark as ancestor
328 ancestors[n] = 1 # Mark as ancestor
329 # Add non-nullid parents to list of nodes to tag.
329 # Add non-nullid parents to list of nodes to tag.
330 nodestotag.extend([p for p in self.parents(n) if
330 nodestotag.extend([p for p in self.parents(n) if
331 p != nullid])
331 p != nullid])
332 elif n in heads: # We've seen it before, is it a fake head?
332 elif n in heads: # We've seen it before, is it a fake head?
333 # So it is, real heads should not be the ancestors of
333 # So it is, real heads should not be the ancestors of
334 # any other heads.
334 # any other heads.
335 heads.pop(n)
335 heads.pop(n)
336 if not ancestors:
336 if not ancestors:
337 return nonodes
337 return nonodes
338 # Now that we have our set of ancestors, we want to remove any
338 # Now that we have our set of ancestors, we want to remove any
339 # roots that are not ancestors.
339 # roots that are not ancestors.
340
340
341 # If one of the roots was nullid, everything is included anyway.
341 # If one of the roots was nullid, everything is included anyway.
342 if lowestrev > -1:
342 if lowestrev > -1:
343 # But, since we weren't, let's recompute the lowest rev to not
343 # But, since we weren't, let's recompute the lowest rev to not
344 # include roots that aren't ancestors.
344 # include roots that aren't ancestors.
345
345
346 # Filter out roots that aren't ancestors of heads
346 # Filter out roots that aren't ancestors of heads
347 roots = [n for n in roots if n in ancestors]
347 roots = [n for n in roots if n in ancestors]
348 # Recompute the lowest revision
348 # Recompute the lowest revision
349 if roots:
349 if roots:
350 lowestrev = min([self.rev(n) for n in roots])
350 lowestrev = min([self.rev(n) for n in roots])
351 else:
351 else:
352 # No more roots? Return empty list
352 # No more roots? Return empty list
353 return nonodes
353 return nonodes
354 else:
354 else:
355 # We are descending from nullid, and don't need to care about
355 # We are descending from nullid, and don't need to care about
356 # any other roots.
356 # any other roots.
357 lowestrev = -1
357 lowestrev = -1
358 roots = [nullid]
358 roots = [nullid]
359 # Transform our roots list into a 'set' (i.e. a dictionary where the
359 # Transform our roots list into a 'set' (i.e. a dictionary where the
360 # values don't matter.
360 # values don't matter.
361 descendents = dict.fromkeys(roots, 1)
361 descendents = dict.fromkeys(roots, 1)
362 # Also, keep the original roots so we can filter out roots that aren't
362 # Also, keep the original roots so we can filter out roots that aren't
363 # 'real' roots (i.e. are descended from other roots).
363 # 'real' roots (i.e. are descended from other roots).
364 roots = descendents.copy()
364 roots = descendents.copy()
365 # Our topologically sorted list of output nodes.
365 # Our topologically sorted list of output nodes.
366 orderedout = []
366 orderedout = []
367 # Don't start at nullid since we don't want nullid in our output list,
367 # Don't start at nullid since we don't want nullid in our output list,
368 # and if nullid shows up in descedents, empty parents will look like
368 # and if nullid shows up in descedents, empty parents will look like
369 # they're descendents.
369 # they're descendents.
370 for r in xrange(max(lowestrev, 0), highestrev + 1):
370 for r in xrange(max(lowestrev, 0), highestrev + 1):
371 n = self.node(r)
371 n = self.node(r)
372 isdescendent = False
372 isdescendent = False
373 if lowestrev == -1: # Everybody is a descendent of nullid
373 if lowestrev == -1: # Everybody is a descendent of nullid
374 isdescendent = True
374 isdescendent = True
375 elif n in descendents:
375 elif n in descendents:
376 # n is already a descendent
376 # n is already a descendent
377 isdescendent = True
377 isdescendent = True
378 # This check only needs to be done here because all the roots
378 # This check only needs to be done here because all the roots
379 # will start being marked is descendents before the loop.
379 # will start being marked is descendents before the loop.
380 if n in roots:
380 if n in roots:
381 # If n was a root, check if it's a 'real' root.
381 # If n was a root, check if it's a 'real' root.
382 p = tuple(self.parents(n))
382 p = tuple(self.parents(n))
383 # If any of its parents are descendents, it's not a root.
383 # If any of its parents are descendents, it's not a root.
384 if (p[0] in descendents) or (p[1] in descendents):
384 if (p[0] in descendents) or (p[1] in descendents):
385 roots.pop(n)
385 roots.pop(n)
386 else:
386 else:
387 p = tuple(self.parents(n))
387 p = tuple(self.parents(n))
388 # A node is a descendent if either of its parents are
388 # A node is a descendent if either of its parents are
389 # descendents. (We seeded the dependents list with the roots
389 # descendents. (We seeded the dependents list with the roots
390 # up there, remember?)
390 # up there, remember?)
391 if (p[0] in descendents) or (p[1] in descendents):
391 if (p[0] in descendents) or (p[1] in descendents):
392 descendents[n] = 1
392 descendents[n] = 1
393 isdescendent = True
393 isdescendent = True
394 if isdescendent and ((ancestors is None) or (n in ancestors)):
394 if isdescendent and ((ancestors is None) or (n in ancestors)):
395 # Only include nodes that are both descendents and ancestors.
395 # Only include nodes that are both descendents and ancestors.
396 orderedout.append(n)
396 orderedout.append(n)
397 if (ancestors is not None) and (n in heads):
397 if (ancestors is not None) and (n in heads):
398 # We're trying to figure out which heads are reachable
398 # We're trying to figure out which heads are reachable
399 # from roots.
399 # from roots.
400 # Mark this head as having been reached
400 # Mark this head as having been reached
401 heads[n] = 1
401 heads[n] = 1
402 elif ancestors is None:
402 elif ancestors is None:
403 # Otherwise, we're trying to discover the heads.
403 # Otherwise, we're trying to discover the heads.
404 # Assume this is a head because if it isn't, the next step
404 # Assume this is a head because if it isn't, the next step
405 # will eventually remove it.
405 # will eventually remove it.
406 heads[n] = 1
406 heads[n] = 1
407 # But, obviously its parents aren't.
407 # But, obviously its parents aren't.
408 for p in self.parents(n):
408 for p in self.parents(n):
409 heads.pop(p, None)
409 heads.pop(p, None)
410 heads = [n for n in heads.iterkeys() if heads[n] != 0]
410 heads = [n for n in heads.iterkeys() if heads[n] != 0]
411 roots = roots.keys()
411 roots = roots.keys()
412 assert orderedout
412 assert orderedout
413 assert roots
413 assert roots
414 assert heads
414 assert heads
415 return (orderedout, roots, heads)
415 return (orderedout, roots, heads)
416
416
417 def heads(self, start=None):
417 def heads(self, start=None):
418 """return the list of all nodes that have no children
418 """return the list of all nodes that have no children
419
419
420 if start is specified, only heads that are descendants of
420 if start is specified, only heads that are descendants of
421 start will be returned
421 start will be returned
422
422
423 """
423 """
424 if start is None:
424 if start is None:
425 start = nullid
425 start = nullid
426 reachable = {start: 1}
426 reachable = {start: 1}
427 heads = {start: 1}
427 heads = {start: 1}
428 startrev = self.rev(start)
428 startrev = self.rev(start)
429
429
430 for r in xrange(startrev + 1, self.count()):
430 for r in xrange(startrev + 1, self.count()):
431 n = self.node(r)
431 n = self.node(r)
432 for pn in self.parents(n):
432 for pn in self.parents(n):
433 if pn in reachable:
433 if pn in reachable:
434 reachable[n] = 1
434 reachable[n] = 1
435 heads[n] = 1
435 heads[n] = 1
436 if pn in heads:
436 if pn in heads:
437 del heads[pn]
437 del heads[pn]
438 return heads.keys()
438 return heads.keys()
439
439
440 def children(self, node):
440 def children(self, node):
441 """find the children of a given node"""
441 """find the children of a given node"""
442 c = []
442 c = []
443 p = self.rev(node)
443 p = self.rev(node)
444 for r in range(p + 1, self.count()):
444 for r in range(p + 1, self.count()):
445 n = self.node(r)
445 n = self.node(r)
446 for pn in self.parents(n):
446 for pn in self.parents(n):
447 if pn == node:
447 if pn == node:
448 c.append(n)
448 c.append(n)
449 continue
449 continue
450 elif pn == nullid:
450 elif pn == nullid:
451 continue
451 continue
452 return c
452 return c
453
453
454 def lookup(self, id):
454 def lookup(self, id):
455 """locate a node based on revision number or subset of hex nodeid"""
455 """locate a node based on revision number or subset of hex nodeid"""
456 try:
456 try:
457 rev = int(id)
457 rev = int(id)
458 if str(rev) != id: raise ValueError
458 if str(rev) != id: raise ValueError
459 if rev < 0: rev = self.count() + rev
459 if rev < 0: rev = self.count() + rev
460 if rev < 0 or rev >= self.count(): raise ValueError
460 if rev < 0 or rev >= self.count(): raise ValueError
461 return self.node(rev)
461 return self.node(rev)
462 except (ValueError, OverflowError):
462 except (ValueError, OverflowError):
463 c = []
463 c = []
464 for n in self.nodemap:
464 for n in self.nodemap:
465 if hex(n).startswith(id):
465 if hex(n).startswith(id):
466 c.append(n)
466 c.append(n)
467 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
467 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
468 if len(c) < 1: raise RevlogError(_("No match found"))
468 if len(c) < 1: raise RevlogError(_("No match found"))
469 return c[0]
469 return c[0]
470
470
471 return None
471 return None
472
472
473 def diff(self, a, b):
473 def diff(self, a, b):
474 """return a delta between two revisions"""
474 """return a delta between two revisions"""
475 return mdiff.textdiff(a, b)
475 return mdiff.textdiff(a, b)
476
476
477 def patches(self, t, pl):
477 def patches(self, t, pl):
478 """apply a list of patches to a string"""
478 """apply a list of patches to a string"""
479 return mdiff.patches(t, pl)
479 return mdiff.patches(t, pl)
480
480
481 def chunk(self, rev):
481 def chunk(self, rev):
482 start, length = self.start(rev), self.length(rev)
482 start, length = self.start(rev), self.length(rev)
483 end = start + length
483 end = start + length
484
484
485 def loadcache():
485 def loadcache():
486 cache_length = max(4096 * 1024, length) # 4Mo
486 cache_length = max(4096 * 1024, length) # 4Mo
487 df = self.opener(self.datafile)
487 df = self.opener(self.datafile)
488 df.seek(start)
488 df.seek(start)
489 self.chunkcache = (start, df.read(cache_length))
489 self.chunkcache = (start, df.read(cache_length))
490
490
491 if not self.chunkcache:
491 if not self.chunkcache:
492 loadcache()
492 loadcache()
493
493
494 cache_start = self.chunkcache[0]
494 cache_start = self.chunkcache[0]
495 cache_end = cache_start + len(self.chunkcache[1])
495 cache_end = cache_start + len(self.chunkcache[1])
496 if start >= cache_start and end <= cache_end:
496 if start >= cache_start and end <= cache_end:
497 # it is cached
497 # it is cached
498 offset = start - cache_start
498 offset = start - cache_start
499 else:
499 else:
500 loadcache()
500 loadcache()
501 offset = 0
501 offset = 0
502
502
503 #def checkchunk():
503 #def checkchunk():
504 # df = self.opener(self.datafile)
504 # df = self.opener(self.datafile)
505 # df.seek(start)
505 # df.seek(start)
506 # return df.read(length)
506 # return df.read(length)
507 #assert s == checkchunk()
507 #assert s == checkchunk()
508 return decompress(self.chunkcache[1][offset:offset + length])
508 return decompress(self.chunkcache[1][offset:offset + length])
509
509
510 def delta(self, node):
510 def delta(self, node):
511 """return or calculate a delta between a node and its predecessor"""
511 """return or calculate a delta between a node and its predecessor"""
512 r = self.rev(node)
512 r = self.rev(node)
513 b = self.base(r)
513 b = self.base(r)
514 if r == b:
514 if r == b:
515 return self.diff(self.revision(self.node(r - 1)),
515 return self.diff(self.revision(self.node(r - 1)),
516 self.revision(node))
516 self.revision(node))
517 else:
517 else:
518 return self.chunk(r)
518 return self.chunk(r)
519
519
520 def revision(self, node):
520 def revision(self, node):
521 """return an uncompressed revision of a given"""
521 """return an uncompressed revision of a given"""
522 if node == nullid: return ""
522 if node == nullid: return ""
523 if self.cache and self.cache[0] == node: return self.cache[2]
523 if self.cache and self.cache[0] == node: return self.cache[2]
524
524
525 # look up what we need to read
525 # look up what we need to read
526 text = None
526 text = None
527 rev = self.rev(node)
527 rev = self.rev(node)
528 base = self.base(rev)
528 base = self.base(rev)
529
529
530 # do we have useful data cached?
530 # do we have useful data cached?
531 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
531 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
532 base = self.cache[1]
532 base = self.cache[1]
533 text = self.cache[2]
533 text = self.cache[2]
534 else:
534 else:
535 text = self.chunk(base)
535 text = self.chunk(base)
536
536
537 bins = []
537 bins = []
538 for r in xrange(base + 1, rev + 1):
538 for r in xrange(base + 1, rev + 1):
539 bins.append(self.chunk(r))
539 bins.append(self.chunk(r))
540
540
541 text = mdiff.patches(text, bins)
541 text = mdiff.patches(text, bins)
542
542
543 p1, p2 = self.parents(node)
543 p1, p2 = self.parents(node)
544 if node != hash(text, p1, p2):
544 if node != hash(text, p1, p2):
545 raise RevlogError(_("integrity check failed on %s:%d")
545 raise RevlogError(_("integrity check failed on %s:%d")
546 % (self.datafile, rev))
546 % (self.datafile, rev))
547
547
548 self.cache = (node, rev, text)
548 self.cache = (node, rev, text)
549 return text
549 return text
550
550
551 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
551 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
552 """add a revision to the log
552 """add a revision to the log
553
553
554 text - the revision data to add
554 text - the revision data to add
555 transaction - the transaction object used for rollback
555 transaction - the transaction object used for rollback
556 link - the linkrev data to add
556 link - the linkrev data to add
557 p1, p2 - the parent nodeids of the revision
557 p1, p2 - the parent nodeids of the revision
558 d - an optional precomputed delta
558 d - an optional precomputed delta
559 """
559 """
560 if text is None: text = ""
560 if text is None: text = ""
561 if p1 is None: p1 = self.tip()
561 if p1 is None: p1 = self.tip()
562 if p2 is None: p2 = nullid
562 if p2 is None: p2 = nullid
563
563
564 node = hash(text, p1, p2)
564 node = hash(text, p1, p2)
565
565
566 if node in self.nodemap:
566 if node in self.nodemap:
567 return node
567 return node
568
568
569 n = self.count()
569 n = self.count()
570 t = n - 1
570 t = n - 1
571
571
572 if n:
572 if n:
573 base = self.base(t)
573 base = self.base(t)
574 start = self.start(base)
574 start = self.start(base)
575 end = self.end(t)
575 end = self.end(t)
576 if not d:
576 if not d:
577 prev = self.revision(self.tip())
577 prev = self.revision(self.tip())
578 d = self.diff(prev, str(text))
578 d = self.diff(prev, str(text))
579 data = compress(d)
579 data = compress(d)
580 l = len(data[1]) + len(data[0])
580 l = len(data[1]) + len(data[0])
581 dist = end - start + l
581 dist = end - start + l
582
582
583 # full versions are inserted when the needed deltas
583 # full versions are inserted when the needed deltas
584 # become comparable to the uncompressed text
584 # become comparable to the uncompressed text
585 if not n or dist > len(text) * 2:
585 if not n or dist > len(text) * 2:
586 data = compress(text)
586 data = compress(text)
587 l = len(data[1]) + len(data[0])
587 l = len(data[1]) + len(data[0])
588 base = n
588 base = n
589 else:
589 else:
590 base = self.base(t)
590 base = self.base(t)
591
591
592 offset = 0
592 offset = 0
593 if t >= 0:
593 if t >= 0:
594 offset = self.end(t)
594 offset = self.end(t)
595
595
596 e = (offset, l, base, link, p1, p2, node)
596 e = (offset, l, base, link, p1, p2, node)
597
597
598 self.index.append(e)
598 self.index.append(e)
599 self.nodemap[node] = n
599 self.nodemap[node] = n
600 entry = struct.pack(indexformat, *e)
600 entry = struct.pack(indexformat, *e)
601
601
602 transaction.add(self.datafile, e[0])
602 transaction.add(self.datafile, e[0])
603 f = self.opener(self.datafile, "a")
603 f = self.opener(self.datafile, "a")
604 if data[0]:
604 if data[0]:
605 f.write(data[0])
605 f.write(data[0])
606 f.write(data[1])
606 f.write(data[1])
607 transaction.add(self.indexfile, n * len(entry))
607 transaction.add(self.indexfile, n * len(entry))
608 self.opener(self.indexfile, "a").write(entry)
608 self.opener(self.indexfile, "a").write(entry)
609
609
610 self.cache = (node, n, text)
610 self.cache = (node, n, text)
611 return node
611 return node
612
612
613 def ancestor(self, a, b):
613 def ancestor(self, a, b):
614 """calculate the least common ancestor of nodes a and b"""
614 """calculate the least common ancestor of nodes a and b"""
615 # calculate the distance of every node from root
615 # calculate the distance of every node from root
616 dist = {nullid: 0}
616 dist = {nullid: 0}
617 for i in xrange(self.count()):
617 for i in xrange(self.count()):
618 n = self.node(i)
618 n = self.node(i)
619 p1, p2 = self.parents(n)
619 p1, p2 = self.parents(n)
620 dist[n] = max(dist[p1], dist[p2]) + 1
620 dist[n] = max(dist[p1], dist[p2]) + 1
621
621
622 # traverse ancestors in order of decreasing distance from root
622 # traverse ancestors in order of decreasing distance from root
623 def ancestors(node):
623 def ancestors(node):
624 # we store negative distances because heap returns smallest member
624 # we store negative distances because heap returns smallest member
625 h = [(-dist[node], node)]
625 h = [(-dist[node], node)]
626 seen = {}
626 seen = {}
627 earliest = self.count()
627 earliest = self.count()
628 while h:
628 while h:
629 d, n = heapq.heappop(h)
629 d, n = heapq.heappop(h)
630 if n not in seen:
630 if n not in seen:
631 seen[n] = 1
631 seen[n] = 1
632 r = self.rev(n)
632 r = self.rev(n)
633 yield (-d, n)
633 yield (-d, n)
634 for p in self.parents(n):
634 for p in self.parents(n):
635 heapq.heappush(h, (-dist[p], p))
635 heapq.heappush(h, (-dist[p], p))
636
636
637 def generations(node):
637 def generations(node):
638 sg, s = None, {}
638 sg, s = None, {}
639 for g,n in ancestors(node):
639 for g,n in ancestors(node):
640 if g != sg:
640 if g != sg:
641 if sg:
641 if sg:
642 yield sg, s
642 yield sg, s
643 sg, s = g, {n:1}
643 sg, s = g, {n:1}
644 else:
644 else:
645 s[n] = 1
645 s[n] = 1
646 yield sg, s
646 yield sg, s
647
647
648 x = generations(a)
648 x = generations(a)
649 y = generations(b)
649 y = generations(b)
650 gx = x.next()
650 gx = x.next()
651 gy = y.next()
651 gy = y.next()
652
652
653 # increment each ancestor list until it is closer to root than
653 # increment each ancestor list until it is closer to root than
654 # the other, or they match
654 # the other, or they match
655 while 1:
655 while 1:
656 #print "ancestor gen %s %s" % (gx[0], gy[0])
656 #print "ancestor gen %s %s" % (gx[0], gy[0])
657 if gx[0] == gy[0]:
657 if gx[0] == gy[0]:
658 # find the intersection
658 # find the intersection
659 i = [ n for n in gx[1] if n in gy[1] ]
659 i = [ n for n in gx[1] if n in gy[1] ]
660 if i:
660 if i:
661 return i[0]
661 return i[0]
662 else:
662 else:
663 #print "next"
663 #print "next"
664 gy = y.next()
664 gy = y.next()
665 gx = x.next()
665 gx = x.next()
666 elif gx[0] < gy[0]:
666 elif gx[0] < gy[0]:
667 #print "next y"
667 #print "next y"
668 gy = y.next()
668 gy = y.next()
669 else:
669 else:
670 #print "next x"
670 #print "next x"
671 gx = x.next()
671 gx = x.next()
672
672
673 def group(self, nodelist, lookup, infocollect=None):
673 def group(self, nodelist, lookup, infocollect=None):
674 """calculate a delta group
674 """calculate a delta group
675
675
676 Given a list of changeset revs, return a set of deltas and
676 Given a list of changeset revs, return a set of deltas and
677 metadata corresponding to nodes. the first delta is
677 metadata corresponding to nodes. the first delta is
678 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
678 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
679 have this parent as it has all history before these
679 have this parent as it has all history before these
680 changesets. parent is parent[0]
680 changesets. parent is parent[0]
681 """
681 """
682 revs = [self.rev(n) for n in nodelist]
682 revs = [self.rev(n) for n in nodelist]
683
683
684 # if we don't have any revisions touched by these changesets, bail
684 # if we don't have any revisions touched by these changesets, bail
685 if not revs:
685 if not revs:
686 yield struct.pack(">l", 0)
686 yield struct.pack(">l", 0)
687 return
687 return
688
688
689 # add the parent of the first rev
689 # add the parent of the first rev
690 p = self.parents(self.node(revs[0]))[0]
690 p = self.parents(self.node(revs[0]))[0]
691 revs.insert(0, self.rev(p))
691 revs.insert(0, self.rev(p))
692
692
693 # helper to reconstruct intermediate versions
693 # helper to reconstruct intermediate versions
694 def construct(text, base, rev):
694 def construct(text, base, rev):
695 bins = [self.chunk(r) for r in xrange(base + 1, rev + 1)]
695 bins = [self.chunk(r) for r in xrange(base + 1, rev + 1)]
696 return mdiff.patches(text, bins)
696 return mdiff.patches(text, bins)
697
697
698 # build deltas
698 # build deltas
699 for d in xrange(0, len(revs) - 1):
699 for d in xrange(0, len(revs) - 1):
700 a, b = revs[d], revs[d + 1]
700 a, b = revs[d], revs[d + 1]
701 na = self.node(a)
701 na = self.node(a)
702 nb = self.node(b)
702 nb = self.node(b)
703
703
704 if infocollect is not None:
704 if infocollect is not None:
705 infocollect(nb)
705 infocollect(nb)
706
706
707 # do we need to construct a new delta?
707 # do we need to construct a new delta?
708 if a + 1 != b or self.base(b) == b:
708 if a + 1 != b or self.base(b) == b:
709 ta = self.revision(na)
709 ta = self.revision(na)
710 tb = self.revision(nb)
710 tb = self.revision(nb)
711 d = self.diff(ta, tb)
711 d = self.diff(ta, tb)
712 else:
712 else:
713 d = self.chunk(b)
713 d = self.chunk(b)
714
714
715 p = self.parents(nb)
715 p = self.parents(nb)
716 meta = nb + p[0] + p[1] + lookup(nb)
716 meta = nb + p[0] + p[1] + lookup(nb)
717 l = struct.pack(">l", len(meta) + len(d) + 4)
717 l = struct.pack(">l", len(meta) + len(d) + 4)
718 yield l
718 yield l
719 yield meta
719 yield meta
720 yield d
720 yield d
721
721
722 yield struct.pack(">l", 0)
722 yield struct.pack(">l", 0)
723
723
724 def addgroup(self, revs, linkmapper, transaction, unique=0):
724 def addgroup(self, revs, linkmapper, transaction, unique=0):
725 """
725 """
726 add a delta group
726 add a delta group
727
727
728 given a set of deltas, add them to the revision log. the
728 given a set of deltas, add them to the revision log. the
729 first delta is against its parent, which should be in our
729 first delta is against its parent, which should be in our
730 log, the rest are against the previous delta.
730 log, the rest are against the previous delta.
731 """
731 """
732
732
733 #track the base of the current delta log
733 #track the base of the current delta log
734 r = self.count()
734 r = self.count()
735 t = r - 1
735 t = r - 1
736 node = nullid
736 node = nullid
737
737
738 base = prev = -1
738 base = prev = -1
739 start = end = measure = 0
739 start = end = measure = 0
740 if r:
740 if r:
741 start = self.start(self.base(t))
741 start = self.start(self.base(t))
742 end = self.end(t)
742 end = self.end(t)
743 measure = self.length(self.base(t))
743 measure = self.length(self.base(t))
744 base = self.base(t)
744 base = self.base(t)
745 prev = self.tip()
745 prev = self.tip()
746
746
747 transaction.add(self.datafile, end)
747 transaction.add(self.datafile, end)
748 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
748 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
749 dfh = self.opener(self.datafile, "a")
749 dfh = self.opener(self.datafile, "a")
750 ifh = self.opener(self.indexfile, "a")
750 ifh = self.opener(self.indexfile, "a")
751
751
752 # loop through our set of deltas
752 # loop through our set of deltas
753 chain = None
753 chain = None
754 for chunk in revs:
754 for chunk in revs:
755 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
755 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
756 link = linkmapper(cs)
756 link = linkmapper(cs)
757 if node in self.nodemap:
757 if node in self.nodemap:
758 # this can happen if two branches make the same change
758 # this can happen if two branches make the same change
759 # if unique:
759 # if unique:
760 # raise RevlogError(_("already have %s") % hex(node[:4]))
760 # raise RevlogError(_("already have %s") % hex(node[:4]))
761 chain = node
761 chain = node
762 continue
762 continue
763 delta = chunk[80:]
763 delta = chunk[80:]
764
764
765 for p in (p1, p2):
765 for p in (p1, p2):
766 if not p in self.nodemap:
766 if not p in self.nodemap:
767 raise RevlogError(_("unknown parent %s") % short(p1))
767 raise RevlogError(_("unknown parent %s") % short(p1))
768
768
769 if not chain:
769 if not chain:
770 # retrieve the parent revision of the delta chain
770 # retrieve the parent revision of the delta chain
771 chain = p1
771 chain = p1
772 if not chain in self.nodemap:
772 if not chain in self.nodemap:
773 raise RevlogError(_("unknown base %s") % short(chain[:4]))
773 raise RevlogError(_("unknown base %s") % short(chain[:4]))
774
774
775 # full versions are inserted when the needed deltas become
775 # full versions are inserted when the needed deltas become
776 # comparable to the uncompressed text or when the previous
776 # comparable to the uncompressed text or when the previous
777 # version is not the one we have a delta against. We use
777 # version is not the one we have a delta against. We use
778 # the size of the previous full rev as a proxy for the
778 # the size of the previous full rev as a proxy for the
779 # current size.
779 # current size.
780
780
781 if chain == prev:
781 if chain == prev:
782 tempd = compress(delta)
782 tempd = compress(delta)
783 cdelta = tempd[0] + tempd[1]
783 cdelta = tempd[0] + tempd[1]
784
784
785 if chain != prev or (end - start + len(cdelta)) > measure * 2:
785 if chain != prev or (end - start + len(cdelta)) > measure * 2:
786 # flush our writes here so we can read it in revision
786 # flush our writes here so we can read it in revision
787 dfh.flush()
787 dfh.flush()
788 ifh.flush()
788 ifh.flush()
789 text = self.revision(chain)
789 text = self.revision(chain)
790 text = self.patches(text, [delta])
790 text = self.patches(text, [delta])
791 chk = self.addrevision(text, transaction, link, p1, p2)
791 chk = self.addrevision(text, transaction, link, p1, p2)
792 if chk != node:
792 if chk != node:
793 raise RevlogError(_("consistency error adding group"))
793 raise RevlogError(_("consistency error adding group"))
794 measure = len(text)
794 measure = len(text)
795 else:
795 else:
796 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
796 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
797 self.index.append(e)
797 self.index.append(e)
798 self.nodemap[node] = r
798 self.nodemap[node] = r
799 dfh.write(cdelta)
799 dfh.write(cdelta)
800 ifh.write(struct.pack(indexformat, *e))
800 ifh.write(struct.pack(indexformat, *e))
801
801
802 t, r, chain, prev = r, r + 1, node, node
802 t, r, chain, prev = r, r + 1, node, node
803 start = self.start(self.base(t))
803 start = self.start(self.base(t))
804 end = self.end(t)
804 end = self.end(t)
805
805
806 dfh.close()
806 dfh.close()
807 ifh.close()
807 ifh.close()
808 return node
808 return node
809
809
810 def strip(self, rev, minlink):
810 def strip(self, rev, minlink):
811 if self.count() == 0 or rev >= self.count():
811 if self.count() == 0 or rev >= self.count():
812 return
812 return
813
813
814 # When stripping away a revision, we need to make sure it
814 # When stripping away a revision, we need to make sure it
815 # does not actually belong to an older changeset.
815 # does not actually belong to an older changeset.
816 # The minlink parameter defines the oldest revision
816 # The minlink parameter defines the oldest revision
817 # we're allowed to strip away.
817 # we're allowed to strip away.
818 while minlink > self.index[rev][3]:
818 while minlink > self.index[rev][3]:
819 rev += 1
819 rev += 1
820 if rev >= self.count():
820 if rev >= self.count():
821 return
821 return
822
822
823 # first truncate the files on disk
823 # first truncate the files on disk
824 end = self.start(rev)
824 end = self.start(rev)
825 self.opener(self.datafile, "a").truncate(end)
825 self.opener(self.datafile, "a").truncate(end)
826 end = rev * struct.calcsize(indexformat)
826 end = rev * struct.calcsize(indexformat)
827 self.opener(self.indexfile, "a").truncate(end)
827 self.opener(self.indexfile, "a").truncate(end)
828
828
829 # then reset internal state in memory to forget those revisions
829 # then reset internal state in memory to forget those revisions
830 self.cache = None
830 self.cache = None
831 for p in self.index[rev:]:
831 for p in self.index[rev:]:
832 del self.nodemap[p[6]]
832 del self.nodemap[p[6]]
833 del self.index[rev:]
833 del self.index[rev:]
834
834
835 # truncating the lazyindex also truncates the lazymap.
835 # truncating the lazyindex also truncates the lazymap.
836 if isinstance(self.index, lazyindex):
836 if isinstance(self.index, lazyindex):
837 self.index.trunc(end)
837 self.index.trunc(end)
838
838
839
839
840 def checksize(self):
840 def checksize(self):
841 expected = 0
841 expected = 0
842 if self.count():
842 if self.count():
843 expected = self.end(self.count() - 1)
843 expected = self.end(self.count() - 1)
844
844 try:
845 try:
845 f = self.opener(self.datafile)
846 f = self.opener(self.datafile)
846 f.seek(0, 2)
847 f.seek(0, 2)
847 actual = f.tell()
848 actual = f.tell()
848 return expected - actual
849 dd = actual - expected
849 except IOError, inst:
850 except IOError, inst:
850 if inst.errno == errno.ENOENT:
851 if inst.errno != errno.ENOENT:
851 return 0
852 raise
852 raise
853 dd = 0
854
855 try:
856 f = self.opener(self.indexfile)
857 f.seek(0, 2)
858 actual = f.tell()
859 s = struct.calcsize(indexformat)
860 i = actual / s
861 di = actual - (i * s)
862 except IOError, inst:
863 if inst.errno != errno.ENOENT:
864 raise
865 di = 0
866
867 return (dd, di)
853
868
854
869
General Comments 0
You need to be logged in to leave comments. Login now