##// END OF EJS Templates
Fix some problems when working on broken repositories:...
Thomas Arendsen Hein -
r1853:5ac811b7 default
parent child Browse files
Show More
@@ -1,1899 +1,1907 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __del__(self):
16 def __del__(self):
17 self.transhandle = None
17 self.transhandle = None
18 def __init__(self, parentui, path=None, create=0):
18 def __init__(self, parentui, path=None, create=0):
19 if not path:
19 if not path:
20 p = os.getcwd()
20 p = os.getcwd()
21 while not os.path.isdir(os.path.join(p, ".hg")):
21 while not os.path.isdir(os.path.join(p, ".hg")):
22 oldp = p
22 oldp = p
23 p = os.path.dirname(p)
23 p = os.path.dirname(p)
24 if p == oldp:
24 if p == oldp:
25 raise repo.RepoError(_("no repo found"))
25 raise repo.RepoError(_("no repo found"))
26 path = p
26 path = p
27 self.path = os.path.join(path, ".hg")
27 self.path = os.path.join(path, ".hg")
28
28
29 if not create and not os.path.isdir(self.path):
29 if not create and not os.path.isdir(self.path):
30 raise repo.RepoError(_("repository %s not found") % path)
30 raise repo.RepoError(_("repository %s not found") % path)
31
31
32 self.root = os.path.abspath(path)
32 self.root = os.path.abspath(path)
33 self.ui = ui.ui(parentui=parentui)
33 self.ui = ui.ui(parentui=parentui)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.manifest = manifest.manifest(self.opener)
36 self.manifest = manifest.manifest(self.opener)
37 self.changelog = changelog.changelog(self.opener)
37 self.changelog = changelog.changelog(self.opener)
38 self.tagscache = None
38 self.tagscache = None
39 self.nodetagscache = None
39 self.nodetagscache = None
40 self.encodepats = None
40 self.encodepats = None
41 self.decodepats = None
41 self.decodepats = None
42 self.transhandle = None
42 self.transhandle = None
43
43
44 if create:
44 if create:
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(self.join("data"))
46 os.mkdir(self.join("data"))
47
47
48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"))
50 self.ui.readconfig(self.join("hgrc"))
51 except IOError:
51 except IOError:
52 pass
52 pass
53
53
54 def hook(self, name, throw=False, **args):
54 def hook(self, name, throw=False, **args):
55 def runhook(name, cmd):
55 def runhook(name, cmd):
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
57 old = {}
57 old = {}
58 for k, v in args.items():
58 for k, v in args.items():
59 k = k.upper()
59 k = k.upper()
60 old['HG_' + k] = os.environ.get(k, None)
60 old['HG_' + k] = os.environ.get(k, None)
61 old[k] = os.environ.get(k, None)
61 old[k] = os.environ.get(k, None)
62 os.environ['HG_' + k] = str(v)
62 os.environ['HG_' + k] = str(v)
63 os.environ[k] = str(v)
63 os.environ[k] = str(v)
64
64
65 try:
65 try:
66 # Hooks run in the repository root
66 # Hooks run in the repository root
67 olddir = os.getcwd()
67 olddir = os.getcwd()
68 os.chdir(self.root)
68 os.chdir(self.root)
69 r = os.system(cmd)
69 r = os.system(cmd)
70 finally:
70 finally:
71 for k, v in old.items():
71 for k, v in old.items():
72 if v is not None:
72 if v is not None:
73 os.environ[k] = v
73 os.environ[k] = v
74 else:
74 else:
75 del os.environ[k]
75 del os.environ[k]
76
76
77 os.chdir(olddir)
77 os.chdir(olddir)
78
78
79 if r:
79 if r:
80 desc, r = util.explain_exit(r)
80 desc, r = util.explain_exit(r)
81 if throw:
81 if throw:
82 raise util.Abort(_('%s hook %s') % (name, desc))
82 raise util.Abort(_('%s hook %s') % (name, desc))
83 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
83 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
84 return False
84 return False
85 return True
85 return True
86
86
87 r = True
87 r = True
88 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
88 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
89 if hname.split(".", 1)[0] == name and cmd]
89 if hname.split(".", 1)[0] == name and cmd]
90 hooks.sort()
90 hooks.sort()
91 for hname, cmd in hooks:
91 for hname, cmd in hooks:
92 r = runhook(hname, cmd) and r
92 r = runhook(hname, cmd) and r
93 return r
93 return r
94
94
95 def tags(self):
95 def tags(self):
96 '''return a mapping of tag to node'''
96 '''return a mapping of tag to node'''
97 if not self.tagscache:
97 if not self.tagscache:
98 self.tagscache = {}
98 self.tagscache = {}
99 def addtag(self, k, n):
99 def addtag(self, k, n):
100 try:
100 try:
101 bin_n = bin(n)
101 bin_n = bin(n)
102 except TypeError:
102 except TypeError:
103 bin_n = ''
103 bin_n = ''
104 self.tagscache[k.strip()] = bin_n
104 self.tagscache[k.strip()] = bin_n
105
105
106 try:
106 try:
107 # read each head of the tags file, ending with the tip
107 # read each head of the tags file, ending with the tip
108 # and add each tag found to the map, with "newer" ones
108 # and add each tag found to the map, with "newer" ones
109 # taking precedence
109 # taking precedence
110 fl = self.file(".hgtags")
110 fl = self.file(".hgtags")
111 h = fl.heads()
111 h = fl.heads()
112 h.reverse()
112 h.reverse()
113 for r in h:
113 for r in h:
114 for l in fl.read(r).splitlines():
114 for l in fl.read(r).splitlines():
115 if l:
115 if l:
116 n, k = l.split(" ", 1)
116 n, k = l.split(" ", 1)
117 addtag(self, k, n)
117 addtag(self, k, n)
118 except KeyError:
118 except KeyError:
119 pass
119 pass
120
120
121 try:
121 try:
122 f = self.opener("localtags")
122 f = self.opener("localtags")
123 for l in f:
123 for l in f:
124 n, k = l.split(" ", 1)
124 n, k = l.split(" ", 1)
125 addtag(self, k, n)
125 addtag(self, k, n)
126 except IOError:
126 except IOError:
127 pass
127 pass
128
128
129 self.tagscache['tip'] = self.changelog.tip()
129 self.tagscache['tip'] = self.changelog.tip()
130
130
131 return self.tagscache
131 return self.tagscache
132
132
133 def tagslist(self):
133 def tagslist(self):
134 '''return a list of tags ordered by revision'''
134 '''return a list of tags ordered by revision'''
135 l = []
135 l = []
136 for t, n in self.tags().items():
136 for t, n in self.tags().items():
137 try:
137 try:
138 r = self.changelog.rev(n)
138 r = self.changelog.rev(n)
139 except:
139 except:
140 r = -2 # sort to the beginning of the list if unknown
140 r = -2 # sort to the beginning of the list if unknown
141 l.append((r, t, n))
141 l.append((r, t, n))
142 l.sort()
142 l.sort()
143 return [(t, n) for r, t, n in l]
143 return [(t, n) for r, t, n in l]
144
144
145 def nodetags(self, node):
145 def nodetags(self, node):
146 '''return the tags associated with a node'''
146 '''return the tags associated with a node'''
147 if not self.nodetagscache:
147 if not self.nodetagscache:
148 self.nodetagscache = {}
148 self.nodetagscache = {}
149 for t, n in self.tags().items():
149 for t, n in self.tags().items():
150 self.nodetagscache.setdefault(n, []).append(t)
150 self.nodetagscache.setdefault(n, []).append(t)
151 return self.nodetagscache.get(node, [])
151 return self.nodetagscache.get(node, [])
152
152
153 def lookup(self, key):
153 def lookup(self, key):
154 try:
154 try:
155 return self.tags()[key]
155 return self.tags()[key]
156 except KeyError:
156 except KeyError:
157 try:
157 try:
158 return self.changelog.lookup(key)
158 return self.changelog.lookup(key)
159 except:
159 except:
160 raise repo.RepoError(_("unknown revision '%s'") % key)
160 raise repo.RepoError(_("unknown revision '%s'") % key)
161
161
162 def dev(self):
162 def dev(self):
163 return os.stat(self.path).st_dev
163 return os.stat(self.path).st_dev
164
164
165 def local(self):
165 def local(self):
166 return True
166 return True
167
167
168 def join(self, f):
168 def join(self, f):
169 return os.path.join(self.path, f)
169 return os.path.join(self.path, f)
170
170
171 def wjoin(self, f):
171 def wjoin(self, f):
172 return os.path.join(self.root, f)
172 return os.path.join(self.root, f)
173
173
174 def file(self, f):
174 def file(self, f):
175 if f[0] == '/':
175 if f[0] == '/':
176 f = f[1:]
176 f = f[1:]
177 return filelog.filelog(self.opener, f)
177 return filelog.filelog(self.opener, f)
178
178
179 def getcwd(self):
179 def getcwd(self):
180 return self.dirstate.getcwd()
180 return self.dirstate.getcwd()
181
181
182 def wfile(self, f, mode='r'):
182 def wfile(self, f, mode='r'):
183 return self.wopener(f, mode)
183 return self.wopener(f, mode)
184
184
185 def wread(self, filename):
185 def wread(self, filename):
186 if self.encodepats == None:
186 if self.encodepats == None:
187 l = []
187 l = []
188 for pat, cmd in self.ui.configitems("encode"):
188 for pat, cmd in self.ui.configitems("encode"):
189 mf = util.matcher("", "/", [pat], [], [])[1]
189 mf = util.matcher("", "/", [pat], [], [])[1]
190 l.append((mf, cmd))
190 l.append((mf, cmd))
191 self.encodepats = l
191 self.encodepats = l
192
192
193 data = self.wopener(filename, 'r').read()
193 data = self.wopener(filename, 'r').read()
194
194
195 for mf, cmd in self.encodepats:
195 for mf, cmd in self.encodepats:
196 if mf(filename):
196 if mf(filename):
197 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
197 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
198 data = util.filter(data, cmd)
198 data = util.filter(data, cmd)
199 break
199 break
200
200
201 return data
201 return data
202
202
203 def wwrite(self, filename, data, fd=None):
203 def wwrite(self, filename, data, fd=None):
204 if self.decodepats == None:
204 if self.decodepats == None:
205 l = []
205 l = []
206 for pat, cmd in self.ui.configitems("decode"):
206 for pat, cmd in self.ui.configitems("decode"):
207 mf = util.matcher("", "/", [pat], [], [])[1]
207 mf = util.matcher("", "/", [pat], [], [])[1]
208 l.append((mf, cmd))
208 l.append((mf, cmd))
209 self.decodepats = l
209 self.decodepats = l
210
210
211 for mf, cmd in self.decodepats:
211 for mf, cmd in self.decodepats:
212 if mf(filename):
212 if mf(filename):
213 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
213 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
214 data = util.filter(data, cmd)
214 data = util.filter(data, cmd)
215 break
215 break
216
216
217 if fd:
217 if fd:
218 return fd.write(data)
218 return fd.write(data)
219 return self.wopener(filename, 'w').write(data)
219 return self.wopener(filename, 'w').write(data)
220
220
221 def transaction(self):
221 def transaction(self):
222 tr = self.transhandle
222 tr = self.transhandle
223 if tr != None and tr.running():
223 if tr != None and tr.running():
224 return tr.nest()
224 return tr.nest()
225
225
226 # save dirstate for undo
226 # save dirstate for undo
227 try:
227 try:
228 ds = self.opener("dirstate").read()
228 ds = self.opener("dirstate").read()
229 except IOError:
229 except IOError:
230 ds = ""
230 ds = ""
231 self.opener("journal.dirstate", "w").write(ds)
231 self.opener("journal.dirstate", "w").write(ds)
232
232
233 tr = transaction.transaction(self.ui.warn, self.opener,
233 tr = transaction.transaction(self.ui.warn, self.opener,
234 self.join("journal"),
234 self.join("journal"),
235 aftertrans(self.path))
235 aftertrans(self.path))
236 self.transhandle = tr
236 self.transhandle = tr
237 return tr
237 return tr
238
238
239 def recover(self):
239 def recover(self):
240 l = self.lock()
240 l = self.lock()
241 if os.path.exists(self.join("journal")):
241 if os.path.exists(self.join("journal")):
242 self.ui.status(_("rolling back interrupted transaction\n"))
242 self.ui.status(_("rolling back interrupted transaction\n"))
243 transaction.rollback(self.opener, self.join("journal"))
243 transaction.rollback(self.opener, self.join("journal"))
244 self.reload()
244 self.reload()
245 return True
245 return True
246 else:
246 else:
247 self.ui.warn(_("no interrupted transaction available\n"))
247 self.ui.warn(_("no interrupted transaction available\n"))
248 return False
248 return False
249
249
250 def undo(self, wlock=None):
250 def undo(self, wlock=None):
251 if not wlock:
251 if not wlock:
252 wlock = self.wlock()
252 wlock = self.wlock()
253 l = self.lock()
253 l = self.lock()
254 if os.path.exists(self.join("undo")):
254 if os.path.exists(self.join("undo")):
255 self.ui.status(_("rolling back last transaction\n"))
255 self.ui.status(_("rolling back last transaction\n"))
256 transaction.rollback(self.opener, self.join("undo"))
256 transaction.rollback(self.opener, self.join("undo"))
257 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
257 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
258 self.reload()
258 self.reload()
259 self.wreload()
259 self.wreload()
260 else:
260 else:
261 self.ui.warn(_("no undo information available\n"))
261 self.ui.warn(_("no undo information available\n"))
262
262
263 def wreload(self):
263 def wreload(self):
264 self.dirstate.read()
264 self.dirstate.read()
265
265
266 def reload(self):
266 def reload(self):
267 self.changelog.load()
267 self.changelog.load()
268 self.manifest.load()
268 self.manifest.load()
269 self.tagscache = None
269 self.tagscache = None
270 self.nodetagscache = None
270 self.nodetagscache = None
271
271
272 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
272 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
273 try:
273 try:
274 l = lock.lock(self.join(lockname), 0, releasefn)
274 l = lock.lock(self.join(lockname), 0, releasefn)
275 except lock.LockHeld, inst:
275 except lock.LockHeld, inst:
276 if not wait:
276 if not wait:
277 raise inst
277 raise inst
278 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
278 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
279 try:
279 try:
280 # default to 600 seconds timeout
280 # default to 600 seconds timeout
281 l = lock.lock(self.join(lockname),
281 l = lock.lock(self.join(lockname),
282 int(self.ui.config("ui", "timeout") or 600),
282 int(self.ui.config("ui", "timeout") or 600),
283 releasefn)
283 releasefn)
284 except lock.LockHeld, inst:
284 except lock.LockHeld, inst:
285 raise util.Abort(_("timeout while waiting for "
285 raise util.Abort(_("timeout while waiting for "
286 "lock held by %s") % inst.args[0])
286 "lock held by %s") % inst.args[0])
287 if acquirefn:
287 if acquirefn:
288 acquirefn()
288 acquirefn()
289 return l
289 return l
290
290
291 def lock(self, wait=1):
291 def lock(self, wait=1):
292 return self.do_lock("lock", wait, acquirefn=self.reload)
292 return self.do_lock("lock", wait, acquirefn=self.reload)
293
293
294 def wlock(self, wait=1):
294 def wlock(self, wait=1):
295 return self.do_lock("wlock", wait,
295 return self.do_lock("wlock", wait,
296 self.dirstate.write,
296 self.dirstate.write,
297 self.wreload)
297 self.wreload)
298
298
299 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
299 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
300 "determine whether a new filenode is needed"
300 "determine whether a new filenode is needed"
301 fp1 = manifest1.get(filename, nullid)
301 fp1 = manifest1.get(filename, nullid)
302 fp2 = manifest2.get(filename, nullid)
302 fp2 = manifest2.get(filename, nullid)
303
303
304 if fp2 != nullid:
304 if fp2 != nullid:
305 # is one parent an ancestor of the other?
305 # is one parent an ancestor of the other?
306 fpa = filelog.ancestor(fp1, fp2)
306 fpa = filelog.ancestor(fp1, fp2)
307 if fpa == fp1:
307 if fpa == fp1:
308 fp1, fp2 = fp2, nullid
308 fp1, fp2 = fp2, nullid
309 elif fpa == fp2:
309 elif fpa == fp2:
310 fp2 = nullid
310 fp2 = nullid
311
311
312 # is the file unmodified from the parent? report existing entry
312 # is the file unmodified from the parent? report existing entry
313 if fp2 == nullid and text == filelog.read(fp1):
313 if fp2 == nullid and text == filelog.read(fp1):
314 return (fp1, None, None)
314 return (fp1, None, None)
315
315
316 return (None, fp1, fp2)
316 return (None, fp1, fp2)
317
317
318 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
318 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
319 orig_parent = self.dirstate.parents()[0] or nullid
319 orig_parent = self.dirstate.parents()[0] or nullid
320 p1 = p1 or self.dirstate.parents()[0] or nullid
320 p1 = p1 or self.dirstate.parents()[0] or nullid
321 p2 = p2 or self.dirstate.parents()[1] or nullid
321 p2 = p2 or self.dirstate.parents()[1] or nullid
322 c1 = self.changelog.read(p1)
322 c1 = self.changelog.read(p1)
323 c2 = self.changelog.read(p2)
323 c2 = self.changelog.read(p2)
324 m1 = self.manifest.read(c1[0])
324 m1 = self.manifest.read(c1[0])
325 mf1 = self.manifest.readflags(c1[0])
325 mf1 = self.manifest.readflags(c1[0])
326 m2 = self.manifest.read(c2[0])
326 m2 = self.manifest.read(c2[0])
327 changed = []
327 changed = []
328
328
329 if orig_parent == p1:
329 if orig_parent == p1:
330 update_dirstate = 1
330 update_dirstate = 1
331 else:
331 else:
332 update_dirstate = 0
332 update_dirstate = 0
333
333
334 if not wlock:
334 if not wlock:
335 wlock = self.wlock()
335 wlock = self.wlock()
336 l = self.lock()
336 l = self.lock()
337 tr = self.transaction()
337 tr = self.transaction()
338 mm = m1.copy()
338 mm = m1.copy()
339 mfm = mf1.copy()
339 mfm = mf1.copy()
340 linkrev = self.changelog.count()
340 linkrev = self.changelog.count()
341 for f in files:
341 for f in files:
342 try:
342 try:
343 t = self.wread(f)
343 t = self.wread(f)
344 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
344 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
345 r = self.file(f)
345 r = self.file(f)
346 mfm[f] = tm
346 mfm[f] = tm
347
347
348 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
348 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
349 if entry:
349 if entry:
350 mm[f] = entry
350 mm[f] = entry
351 continue
351 continue
352
352
353 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
353 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
354 changed.append(f)
354 changed.append(f)
355 if update_dirstate:
355 if update_dirstate:
356 self.dirstate.update([f], "n")
356 self.dirstate.update([f], "n")
357 except IOError:
357 except IOError:
358 try:
358 try:
359 del mm[f]
359 del mm[f]
360 del mfm[f]
360 del mfm[f]
361 if update_dirstate:
361 if update_dirstate:
362 self.dirstate.forget([f])
362 self.dirstate.forget([f])
363 except:
363 except:
364 # deleted from p2?
364 # deleted from p2?
365 pass
365 pass
366
366
367 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
367 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
368 user = user or self.ui.username()
368 user = user or self.ui.username()
369 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
369 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
370 tr.close()
370 tr.close()
371 if update_dirstate:
371 if update_dirstate:
372 self.dirstate.setparents(n, nullid)
372 self.dirstate.setparents(n, nullid)
373
373
374 def commit(self, files=None, text="", user=None, date=None,
374 def commit(self, files=None, text="", user=None, date=None,
375 match=util.always, force=False, lock=None, wlock=None):
375 match=util.always, force=False, lock=None, wlock=None):
376 commit = []
376 commit = []
377 remove = []
377 remove = []
378 changed = []
378 changed = []
379
379
380 if files:
380 if files:
381 for f in files:
381 for f in files:
382 s = self.dirstate.state(f)
382 s = self.dirstate.state(f)
383 if s in 'nmai':
383 if s in 'nmai':
384 commit.append(f)
384 commit.append(f)
385 elif s == 'r':
385 elif s == 'r':
386 remove.append(f)
386 remove.append(f)
387 else:
387 else:
388 self.ui.warn(_("%s not tracked!\n") % f)
388 self.ui.warn(_("%s not tracked!\n") % f)
389 else:
389 else:
390 modified, added, removed, deleted, unknown = self.changes(match=match)
390 modified, added, removed, deleted, unknown = self.changes(match=match)
391 commit = modified + added
391 commit = modified + added
392 remove = removed
392 remove = removed
393
393
394 p1, p2 = self.dirstate.parents()
394 p1, p2 = self.dirstate.parents()
395 c1 = self.changelog.read(p1)
395 c1 = self.changelog.read(p1)
396 c2 = self.changelog.read(p2)
396 c2 = self.changelog.read(p2)
397 m1 = self.manifest.read(c1[0])
397 m1 = self.manifest.read(c1[0])
398 mf1 = self.manifest.readflags(c1[0])
398 mf1 = self.manifest.readflags(c1[0])
399 m2 = self.manifest.read(c2[0])
399 m2 = self.manifest.read(c2[0])
400
400
401 if not commit and not remove and not force and p2 == nullid:
401 if not commit and not remove and not force and p2 == nullid:
402 self.ui.status(_("nothing changed\n"))
402 self.ui.status(_("nothing changed\n"))
403 return None
403 return None
404
404
405 xp1 = hex(p1)
405 xp1 = hex(p1)
406 if p2 == nullid: xp2 = ''
406 if p2 == nullid: xp2 = ''
407 else: xp2 = hex(p2)
407 else: xp2 = hex(p2)
408
408
409 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
409 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
410
410
411 if not wlock:
411 if not wlock:
412 wlock = self.wlock()
412 wlock = self.wlock()
413 if not lock:
413 if not lock:
414 lock = self.lock()
414 lock = self.lock()
415 tr = self.transaction()
415 tr = self.transaction()
416
416
417 # check in files
417 # check in files
418 new = {}
418 new = {}
419 linkrev = self.changelog.count()
419 linkrev = self.changelog.count()
420 commit.sort()
420 commit.sort()
421 for f in commit:
421 for f in commit:
422 self.ui.note(f + "\n")
422 self.ui.note(f + "\n")
423 try:
423 try:
424 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
424 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
425 t = self.wread(f)
425 t = self.wread(f)
426 except IOError:
426 except IOError:
427 self.ui.warn(_("trouble committing %s!\n") % f)
427 self.ui.warn(_("trouble committing %s!\n") % f)
428 raise
428 raise
429
429
430 r = self.file(f)
430 r = self.file(f)
431
431
432 meta = {}
432 meta = {}
433 cp = self.dirstate.copied(f)
433 cp = self.dirstate.copied(f)
434 if cp:
434 if cp:
435 meta["copy"] = cp
435 meta["copy"] = cp
436 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
436 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
437 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
437 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
438 fp1, fp2 = nullid, nullid
438 fp1, fp2 = nullid, nullid
439 else:
439 else:
440 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
440 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
441 if entry:
441 if entry:
442 new[f] = entry
442 new[f] = entry
443 continue
443 continue
444
444
445 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
445 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
446 # remember what we've added so that we can later calculate
446 # remember what we've added so that we can later calculate
447 # the files to pull from a set of changesets
447 # the files to pull from a set of changesets
448 changed.append(f)
448 changed.append(f)
449
449
450 # update manifest
450 # update manifest
451 m1 = m1.copy()
451 m1 = m1.copy()
452 m1.update(new)
452 m1.update(new)
453 for f in remove:
453 for f in remove:
454 if f in m1:
454 if f in m1:
455 del m1[f]
455 del m1[f]
456 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
456 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
457 (new, remove))
457 (new, remove))
458
458
459 # add changeset
459 # add changeset
460 new = new.keys()
460 new = new.keys()
461 new.sort()
461 new.sort()
462
462
463 if not text:
463 if not text:
464 edittext = [""]
464 edittext = [""]
465 if p2 != nullid:
465 if p2 != nullid:
466 edittext.append("HG: branch merge")
466 edittext.append("HG: branch merge")
467 edittext.extend(["HG: changed %s" % f for f in changed])
467 edittext.extend(["HG: changed %s" % f for f in changed])
468 edittext.extend(["HG: removed %s" % f for f in remove])
468 edittext.extend(["HG: removed %s" % f for f in remove])
469 if not changed and not remove:
469 if not changed and not remove:
470 edittext.append("HG: no files changed")
470 edittext.append("HG: no files changed")
471 edittext.append("")
471 edittext.append("")
472 # run editor in the repository root
472 # run editor in the repository root
473 olddir = os.getcwd()
473 olddir = os.getcwd()
474 os.chdir(self.root)
474 os.chdir(self.root)
475 edittext = self.ui.edit("\n".join(edittext))
475 edittext = self.ui.edit("\n".join(edittext))
476 os.chdir(olddir)
476 os.chdir(olddir)
477 if not edittext.rstrip():
477 if not edittext.rstrip():
478 return None
478 return None
479 text = edittext
479 text = edittext
480
480
481 user = user or self.ui.username()
481 user = user or self.ui.username()
482 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
482 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
483 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
483 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
484 parent2=xp2)
484 parent2=xp2)
485 tr.close()
485 tr.close()
486
486
487 self.dirstate.setparents(n)
487 self.dirstate.setparents(n)
488 self.dirstate.update(new, "n")
488 self.dirstate.update(new, "n")
489 self.dirstate.forget(remove)
489 self.dirstate.forget(remove)
490
490
491 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
491 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
492 return n
492 return n
493
493
494 def walk(self, node=None, files=[], match=util.always):
494 def walk(self, node=None, files=[], match=util.always):
495 if node:
495 if node:
496 fdict = dict.fromkeys(files)
496 fdict = dict.fromkeys(files)
497 for fn in self.manifest.read(self.changelog.read(node)[0]):
497 for fn in self.manifest.read(self.changelog.read(node)[0]):
498 fdict.pop(fn, None)
498 fdict.pop(fn, None)
499 if match(fn):
499 if match(fn):
500 yield 'm', fn
500 yield 'm', fn
501 for fn in fdict:
501 for fn in fdict:
502 self.ui.warn(_('%s: No such file in rev %s\n') % (
502 self.ui.warn(_('%s: No such file in rev %s\n') % (
503 util.pathto(self.getcwd(), fn), short(node)))
503 util.pathto(self.getcwd(), fn), short(node)))
504 else:
504 else:
505 for src, fn in self.dirstate.walk(files, match):
505 for src, fn in self.dirstate.walk(files, match):
506 yield src, fn
506 yield src, fn
507
507
508 def changes(self, node1=None, node2=None, files=[], match=util.always,
508 def changes(self, node1=None, node2=None, files=[], match=util.always,
509 wlock=None):
509 wlock=None):
510 """return changes between two nodes or node and working directory
510 """return changes between two nodes or node and working directory
511
511
512 If node1 is None, use the first dirstate parent instead.
512 If node1 is None, use the first dirstate parent instead.
513 If node2 is None, compare node1 with working directory.
513 If node2 is None, compare node1 with working directory.
514 """
514 """
515
515
516 def fcmp(fn, mf):
516 def fcmp(fn, mf):
517 t1 = self.wread(fn)
517 t1 = self.wread(fn)
518 t2 = self.file(fn).read(mf.get(fn, nullid))
518 t2 = self.file(fn).read(mf.get(fn, nullid))
519 return cmp(t1, t2)
519 return cmp(t1, t2)
520
520
521 def mfmatches(node):
521 def mfmatches(node):
522 change = self.changelog.read(node)
522 change = self.changelog.read(node)
523 mf = dict(self.manifest.read(change[0]))
523 mf = dict(self.manifest.read(change[0]))
524 for fn in mf.keys():
524 for fn in mf.keys():
525 if not match(fn):
525 if not match(fn):
526 del mf[fn]
526 del mf[fn]
527 return mf
527 return mf
528
528
529 if node1:
529 if node1:
530 # read the manifest from node1 before the manifest from node2,
530 # read the manifest from node1 before the manifest from node2,
531 # so that we'll hit the manifest cache if we're going through
531 # so that we'll hit the manifest cache if we're going through
532 # all the revisions in parent->child order.
532 # all the revisions in parent->child order.
533 mf1 = mfmatches(node1)
533 mf1 = mfmatches(node1)
534
534
535 # are we comparing the working directory?
535 # are we comparing the working directory?
536 if not node2:
536 if not node2:
537 if not wlock:
537 if not wlock:
538 try:
538 try:
539 wlock = self.wlock(wait=0)
539 wlock = self.wlock(wait=0)
540 except lock.LockException:
540 except lock.LockException:
541 wlock = None
541 wlock = None
542 lookup, modified, added, removed, deleted, unknown = (
542 lookup, modified, added, removed, deleted, unknown = (
543 self.dirstate.changes(files, match))
543 self.dirstate.changes(files, match))
544
544
545 # are we comparing working dir against its parent?
545 # are we comparing working dir against its parent?
546 if not node1:
546 if not node1:
547 if lookup:
547 if lookup:
548 # do a full compare of any files that might have changed
548 # do a full compare of any files that might have changed
549 mf2 = mfmatches(self.dirstate.parents()[0])
549 mf2 = mfmatches(self.dirstate.parents()[0])
550 for f in lookup:
550 for f in lookup:
551 if fcmp(f, mf2):
551 if fcmp(f, mf2):
552 modified.append(f)
552 modified.append(f)
553 elif wlock is not None:
553 elif wlock is not None:
554 self.dirstate.update([f], "n")
554 self.dirstate.update([f], "n")
555 else:
555 else:
556 # we are comparing working dir against non-parent
556 # we are comparing working dir against non-parent
557 # generate a pseudo-manifest for the working dir
557 # generate a pseudo-manifest for the working dir
558 mf2 = mfmatches(self.dirstate.parents()[0])
558 mf2 = mfmatches(self.dirstate.parents()[0])
559 for f in lookup + modified + added:
559 for f in lookup + modified + added:
560 mf2[f] = ""
560 mf2[f] = ""
561 for f in removed:
561 for f in removed:
562 if f in mf2:
562 if f in mf2:
563 del mf2[f]
563 del mf2[f]
564 else:
564 else:
565 # we are comparing two revisions
565 # we are comparing two revisions
566 deleted, unknown = [], []
566 deleted, unknown = [], []
567 mf2 = mfmatches(node2)
567 mf2 = mfmatches(node2)
568
568
569 if node1:
569 if node1:
570 # flush lists from dirstate before comparing manifests
570 # flush lists from dirstate before comparing manifests
571 modified, added = [], []
571 modified, added = [], []
572
572
573 for fn in mf2:
573 for fn in mf2:
574 if mf1.has_key(fn):
574 if mf1.has_key(fn):
575 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
575 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
576 modified.append(fn)
576 modified.append(fn)
577 del mf1[fn]
577 del mf1[fn]
578 else:
578 else:
579 added.append(fn)
579 added.append(fn)
580
580
581 removed = mf1.keys()
581 removed = mf1.keys()
582
582
583 # sort and return results:
583 # sort and return results:
584 for l in modified, added, removed, deleted, unknown:
584 for l in modified, added, removed, deleted, unknown:
585 l.sort()
585 l.sort()
586 return (modified, added, removed, deleted, unknown)
586 return (modified, added, removed, deleted, unknown)
587
587
588 def add(self, list, wlock=None):
588 def add(self, list, wlock=None):
589 if not wlock:
589 if not wlock:
590 wlock = self.wlock()
590 wlock = self.wlock()
591 for f in list:
591 for f in list:
592 p = self.wjoin(f)
592 p = self.wjoin(f)
593 if not os.path.exists(p):
593 if not os.path.exists(p):
594 self.ui.warn(_("%s does not exist!\n") % f)
594 self.ui.warn(_("%s does not exist!\n") % f)
595 elif not os.path.isfile(p):
595 elif not os.path.isfile(p):
596 self.ui.warn(_("%s not added: only files supported currently\n")
596 self.ui.warn(_("%s not added: only files supported currently\n")
597 % f)
597 % f)
598 elif self.dirstate.state(f) in 'an':
598 elif self.dirstate.state(f) in 'an':
599 self.ui.warn(_("%s already tracked!\n") % f)
599 self.ui.warn(_("%s already tracked!\n") % f)
600 else:
600 else:
601 self.dirstate.update([f], "a")
601 self.dirstate.update([f], "a")
602
602
603 def forget(self, list, wlock=None):
603 def forget(self, list, wlock=None):
604 if not wlock:
604 if not wlock:
605 wlock = self.wlock()
605 wlock = self.wlock()
606 for f in list:
606 for f in list:
607 if self.dirstate.state(f) not in 'ai':
607 if self.dirstate.state(f) not in 'ai':
608 self.ui.warn(_("%s not added!\n") % f)
608 self.ui.warn(_("%s not added!\n") % f)
609 else:
609 else:
610 self.dirstate.forget([f])
610 self.dirstate.forget([f])
611
611
612 def remove(self, list, unlink=False, wlock=None):
612 def remove(self, list, unlink=False, wlock=None):
613 if unlink:
613 if unlink:
614 for f in list:
614 for f in list:
615 try:
615 try:
616 util.unlink(self.wjoin(f))
616 util.unlink(self.wjoin(f))
617 except OSError, inst:
617 except OSError, inst:
618 if inst.errno != errno.ENOENT:
618 if inst.errno != errno.ENOENT:
619 raise
619 raise
620 if not wlock:
620 if not wlock:
621 wlock = self.wlock()
621 wlock = self.wlock()
622 for f in list:
622 for f in list:
623 p = self.wjoin(f)
623 p = self.wjoin(f)
624 if os.path.exists(p):
624 if os.path.exists(p):
625 self.ui.warn(_("%s still exists!\n") % f)
625 self.ui.warn(_("%s still exists!\n") % f)
626 elif self.dirstate.state(f) == 'a':
626 elif self.dirstate.state(f) == 'a':
627 self.dirstate.forget([f])
627 self.dirstate.forget([f])
628 elif f not in self.dirstate:
628 elif f not in self.dirstate:
629 self.ui.warn(_("%s not tracked!\n") % f)
629 self.ui.warn(_("%s not tracked!\n") % f)
630 else:
630 else:
631 self.dirstate.update([f], "r")
631 self.dirstate.update([f], "r")
632
632
633 def undelete(self, list, wlock=None):
633 def undelete(self, list, wlock=None):
634 p = self.dirstate.parents()[0]
634 p = self.dirstate.parents()[0]
635 mn = self.changelog.read(p)[0]
635 mn = self.changelog.read(p)[0]
636 mf = self.manifest.readflags(mn)
636 mf = self.manifest.readflags(mn)
637 m = self.manifest.read(mn)
637 m = self.manifest.read(mn)
638 if not wlock:
638 if not wlock:
639 wlock = self.wlock()
639 wlock = self.wlock()
640 for f in list:
640 for f in list:
641 if self.dirstate.state(f) not in "r":
641 if self.dirstate.state(f) not in "r":
642 self.ui.warn("%s not removed!\n" % f)
642 self.ui.warn("%s not removed!\n" % f)
643 else:
643 else:
644 t = self.file(f).read(m[f])
644 t = self.file(f).read(m[f])
645 self.wwrite(f, t)
645 self.wwrite(f, t)
646 util.set_exec(self.wjoin(f), mf[f])
646 util.set_exec(self.wjoin(f), mf[f])
647 self.dirstate.update([f], "n")
647 self.dirstate.update([f], "n")
648
648
649 def copy(self, source, dest, wlock=None):
649 def copy(self, source, dest, wlock=None):
650 p = self.wjoin(dest)
650 p = self.wjoin(dest)
651 if not os.path.exists(p):
651 if not os.path.exists(p):
652 self.ui.warn(_("%s does not exist!\n") % dest)
652 self.ui.warn(_("%s does not exist!\n") % dest)
653 elif not os.path.isfile(p):
653 elif not os.path.isfile(p):
654 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
654 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
655 else:
655 else:
656 if not wlock:
656 if not wlock:
657 wlock = self.wlock()
657 wlock = self.wlock()
658 if self.dirstate.state(dest) == '?':
658 if self.dirstate.state(dest) == '?':
659 self.dirstate.update([dest], "a")
659 self.dirstate.update([dest], "a")
660 self.dirstate.copy(source, dest)
660 self.dirstate.copy(source, dest)
661
661
662 def heads(self, start=None):
662 def heads(self, start=None):
663 heads = self.changelog.heads(start)
663 heads = self.changelog.heads(start)
664 # sort the output in rev descending order
664 # sort the output in rev descending order
665 heads = [(-self.changelog.rev(h), h) for h in heads]
665 heads = [(-self.changelog.rev(h), h) for h in heads]
666 heads.sort()
666 heads.sort()
667 return [n for (r, n) in heads]
667 return [n for (r, n) in heads]
668
668
669 # branchlookup returns a dict giving a list of branches for
669 # branchlookup returns a dict giving a list of branches for
670 # each head. A branch is defined as the tag of a node or
670 # each head. A branch is defined as the tag of a node or
671 # the branch of the node's parents. If a node has multiple
671 # the branch of the node's parents. If a node has multiple
672 # branch tags, tags are eliminated if they are visible from other
672 # branch tags, tags are eliminated if they are visible from other
673 # branch tags.
673 # branch tags.
674 #
674 #
675 # So, for this graph: a->b->c->d->e
675 # So, for this graph: a->b->c->d->e
676 # \ /
676 # \ /
677 # aa -----/
677 # aa -----/
678 # a has tag 2.6.12
678 # a has tag 2.6.12
679 # d has tag 2.6.13
679 # d has tag 2.6.13
680 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
680 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
681 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
681 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
682 # from the list.
682 # from the list.
683 #
683 #
684 # It is possible that more than one head will have the same branch tag.
684 # It is possible that more than one head will have the same branch tag.
685 # callers need to check the result for multiple heads under the same
685 # callers need to check the result for multiple heads under the same
686 # branch tag if that is a problem for them (ie checkout of a specific
686 # branch tag if that is a problem for them (ie checkout of a specific
687 # branch).
687 # branch).
688 #
688 #
689 # passing in a specific branch will limit the depth of the search
689 # passing in a specific branch will limit the depth of the search
690 # through the parents. It won't limit the branches returned in the
690 # through the parents. It won't limit the branches returned in the
691 # result though.
691 # result though.
692 def branchlookup(self, heads=None, branch=None):
692 def branchlookup(self, heads=None, branch=None):
693 if not heads:
693 if not heads:
694 heads = self.heads()
694 heads = self.heads()
695 headt = [ h for h in heads ]
695 headt = [ h for h in heads ]
696 chlog = self.changelog
696 chlog = self.changelog
697 branches = {}
697 branches = {}
698 merges = []
698 merges = []
699 seenmerge = {}
699 seenmerge = {}
700
700
701 # traverse the tree once for each head, recording in the branches
701 # traverse the tree once for each head, recording in the branches
702 # dict which tags are visible from this head. The branches
702 # dict which tags are visible from this head. The branches
703 # dict also records which tags are visible from each tag
703 # dict also records which tags are visible from each tag
704 # while we traverse.
704 # while we traverse.
705 while headt or merges:
705 while headt or merges:
706 if merges:
706 if merges:
707 n, found = merges.pop()
707 n, found = merges.pop()
708 visit = [n]
708 visit = [n]
709 else:
709 else:
710 h = headt.pop()
710 h = headt.pop()
711 visit = [h]
711 visit = [h]
712 found = [h]
712 found = [h]
713 seen = {}
713 seen = {}
714 while visit:
714 while visit:
715 n = visit.pop()
715 n = visit.pop()
716 if n in seen:
716 if n in seen:
717 continue
717 continue
718 pp = chlog.parents(n)
718 pp = chlog.parents(n)
719 tags = self.nodetags(n)
719 tags = self.nodetags(n)
720 if tags:
720 if tags:
721 for x in tags:
721 for x in tags:
722 if x == 'tip':
722 if x == 'tip':
723 continue
723 continue
724 for f in found:
724 for f in found:
725 branches.setdefault(f, {})[n] = 1
725 branches.setdefault(f, {})[n] = 1
726 branches.setdefault(n, {})[n] = 1
726 branches.setdefault(n, {})[n] = 1
727 break
727 break
728 if n not in found:
728 if n not in found:
729 found.append(n)
729 found.append(n)
730 if branch in tags:
730 if branch in tags:
731 continue
731 continue
732 seen[n] = 1
732 seen[n] = 1
733 if pp[1] != nullid and n not in seenmerge:
733 if pp[1] != nullid and n not in seenmerge:
734 merges.append((pp[1], [x for x in found]))
734 merges.append((pp[1], [x for x in found]))
735 seenmerge[n] = 1
735 seenmerge[n] = 1
736 if pp[0] != nullid:
736 if pp[0] != nullid:
737 visit.append(pp[0])
737 visit.append(pp[0])
738 # traverse the branches dict, eliminating branch tags from each
738 # traverse the branches dict, eliminating branch tags from each
739 # head that are visible from another branch tag for that head.
739 # head that are visible from another branch tag for that head.
740 out = {}
740 out = {}
741 viscache = {}
741 viscache = {}
742 for h in heads:
742 for h in heads:
743 def visible(node):
743 def visible(node):
744 if node in viscache:
744 if node in viscache:
745 return viscache[node]
745 return viscache[node]
746 ret = {}
746 ret = {}
747 visit = [node]
747 visit = [node]
748 while visit:
748 while visit:
749 x = visit.pop()
749 x = visit.pop()
750 if x in viscache:
750 if x in viscache:
751 ret.update(viscache[x])
751 ret.update(viscache[x])
752 elif x not in ret:
752 elif x not in ret:
753 ret[x] = 1
753 ret[x] = 1
754 if x in branches:
754 if x in branches:
755 visit[len(visit):] = branches[x].keys()
755 visit[len(visit):] = branches[x].keys()
756 viscache[node] = ret
756 viscache[node] = ret
757 return ret
757 return ret
758 if h not in branches:
758 if h not in branches:
759 continue
759 continue
760 # O(n^2), but somewhat limited. This only searches the
760 # O(n^2), but somewhat limited. This only searches the
761 # tags visible from a specific head, not all the tags in the
761 # tags visible from a specific head, not all the tags in the
762 # whole repo.
762 # whole repo.
763 for b in branches[h]:
763 for b in branches[h]:
764 vis = False
764 vis = False
765 for bb in branches[h].keys():
765 for bb in branches[h].keys():
766 if b != bb:
766 if b != bb:
767 if b in visible(bb):
767 if b in visible(bb):
768 vis = True
768 vis = True
769 break
769 break
770 if not vis:
770 if not vis:
771 l = out.setdefault(h, [])
771 l = out.setdefault(h, [])
772 l[len(l):] = self.nodetags(b)
772 l[len(l):] = self.nodetags(b)
773 return out
773 return out
774
774
775 def branches(self, nodes):
775 def branches(self, nodes):
776 if not nodes:
776 if not nodes:
777 nodes = [self.changelog.tip()]
777 nodes = [self.changelog.tip()]
778 b = []
778 b = []
779 for n in nodes:
779 for n in nodes:
780 t = n
780 t = n
781 while n:
781 while n:
782 p = self.changelog.parents(n)
782 p = self.changelog.parents(n)
783 if p[1] != nullid or p[0] == nullid:
783 if p[1] != nullid or p[0] == nullid:
784 b.append((t, n, p[0], p[1]))
784 b.append((t, n, p[0], p[1]))
785 break
785 break
786 n = p[0]
786 n = p[0]
787 return b
787 return b
788
788
789 def between(self, pairs):
789 def between(self, pairs):
790 r = []
790 r = []
791
791
792 for top, bottom in pairs:
792 for top, bottom in pairs:
793 n, l, i = top, [], 0
793 n, l, i = top, [], 0
794 f = 1
794 f = 1
795
795
796 while n != bottom:
796 while n != bottom:
797 p = self.changelog.parents(n)[0]
797 p = self.changelog.parents(n)[0]
798 if i == f:
798 if i == f:
799 l.append(n)
799 l.append(n)
800 f = f * 2
800 f = f * 2
801 n = p
801 n = p
802 i += 1
802 i += 1
803
803
804 r.append(l)
804 r.append(l)
805
805
806 return r
806 return r
807
807
808 def findincoming(self, remote, base=None, heads=None):
808 def findincoming(self, remote, base=None, heads=None):
809 m = self.changelog.nodemap
809 m = self.changelog.nodemap
810 search = []
810 search = []
811 fetch = {}
811 fetch = {}
812 seen = {}
812 seen = {}
813 seenbranch = {}
813 seenbranch = {}
814 if base == None:
814 if base == None:
815 base = {}
815 base = {}
816
816
817 # assume we're closer to the tip than the root
817 # assume we're closer to the tip than the root
818 # and start by examining the heads
818 # and start by examining the heads
819 self.ui.status(_("searching for changes\n"))
819 self.ui.status(_("searching for changes\n"))
820
820
821 if not heads:
821 if not heads:
822 heads = remote.heads()
822 heads = remote.heads()
823
823
824 unknown = []
824 unknown = []
825 for h in heads:
825 for h in heads:
826 if h not in m:
826 if h not in m:
827 unknown.append(h)
827 unknown.append(h)
828 else:
828 else:
829 base[h] = 1
829 base[h] = 1
830
830
831 if not unknown:
831 if not unknown:
832 return None
832 return None
833
833
834 rep = {}
834 rep = {}
835 reqcnt = 0
835 reqcnt = 0
836
836
837 # search through remote branches
837 # search through remote branches
838 # a 'branch' here is a linear segment of history, with four parts:
838 # a 'branch' here is a linear segment of history, with four parts:
839 # head, root, first parent, second parent
839 # head, root, first parent, second parent
840 # (a branch always has two parents (or none) by definition)
840 # (a branch always has two parents (or none) by definition)
841 unknown = remote.branches(unknown)
841 unknown = remote.branches(unknown)
842 while unknown:
842 while unknown:
843 r = []
843 r = []
844 while unknown:
844 while unknown:
845 n = unknown.pop(0)
845 n = unknown.pop(0)
846 if n[0] in seen:
846 if n[0] in seen:
847 continue
847 continue
848
848
849 self.ui.debug(_("examining %s:%s\n")
849 self.ui.debug(_("examining %s:%s\n")
850 % (short(n[0]), short(n[1])))
850 % (short(n[0]), short(n[1])))
851 if n[0] == nullid:
851 if n[0] == nullid:
852 break
852 break
853 if n in seenbranch:
853 if n in seenbranch:
854 self.ui.debug(_("branch already found\n"))
854 self.ui.debug(_("branch already found\n"))
855 continue
855 continue
856 if n[1] and n[1] in m: # do we know the base?
856 if n[1] and n[1] in m: # do we know the base?
857 self.ui.debug(_("found incomplete branch %s:%s\n")
857 self.ui.debug(_("found incomplete branch %s:%s\n")
858 % (short(n[0]), short(n[1])))
858 % (short(n[0]), short(n[1])))
859 search.append(n) # schedule branch range for scanning
859 search.append(n) # schedule branch range for scanning
860 seenbranch[n] = 1
860 seenbranch[n] = 1
861 else:
861 else:
862 if n[1] not in seen and n[1] not in fetch:
862 if n[1] not in seen and n[1] not in fetch:
863 if n[2] in m and n[3] in m:
863 if n[2] in m and n[3] in m:
864 self.ui.debug(_("found new changeset %s\n") %
864 self.ui.debug(_("found new changeset %s\n") %
865 short(n[1]))
865 short(n[1]))
866 fetch[n[1]] = 1 # earliest unknown
866 fetch[n[1]] = 1 # earliest unknown
867 base[n[2]] = 1 # latest known
867 base[n[2]] = 1 # latest known
868 continue
868 continue
869
869
870 for a in n[2:4]:
870 for a in n[2:4]:
871 if a not in rep:
871 if a not in rep:
872 r.append(a)
872 r.append(a)
873 rep[a] = 1
873 rep[a] = 1
874
874
875 seen[n[0]] = 1
875 seen[n[0]] = 1
876
876
877 if r:
877 if r:
878 reqcnt += 1
878 reqcnt += 1
879 self.ui.debug(_("request %d: %s\n") %
879 self.ui.debug(_("request %d: %s\n") %
880 (reqcnt, " ".join(map(short, r))))
880 (reqcnt, " ".join(map(short, r))))
881 for p in range(0, len(r), 10):
881 for p in range(0, len(r), 10):
882 for b in remote.branches(r[p:p+10]):
882 for b in remote.branches(r[p:p+10]):
883 self.ui.debug(_("received %s:%s\n") %
883 self.ui.debug(_("received %s:%s\n") %
884 (short(b[0]), short(b[1])))
884 (short(b[0]), short(b[1])))
885 if b[0] in m:
885 if b[0] in m:
886 self.ui.debug(_("found base node %s\n")
886 self.ui.debug(_("found base node %s\n")
887 % short(b[0]))
887 % short(b[0]))
888 base[b[0]] = 1
888 base[b[0]] = 1
889 elif b[0] not in seen:
889 elif b[0] not in seen:
890 unknown.append(b)
890 unknown.append(b)
891
891
892 # do binary search on the branches we found
892 # do binary search on the branches we found
893 while search:
893 while search:
894 n = search.pop(0)
894 n = search.pop(0)
895 reqcnt += 1
895 reqcnt += 1
896 l = remote.between([(n[0], n[1])])[0]
896 l = remote.between([(n[0], n[1])])[0]
897 l.append(n[1])
897 l.append(n[1])
898 p = n[0]
898 p = n[0]
899 f = 1
899 f = 1
900 for i in l:
900 for i in l:
901 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
901 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
902 if i in m:
902 if i in m:
903 if f <= 2:
903 if f <= 2:
904 self.ui.debug(_("found new branch changeset %s\n") %
904 self.ui.debug(_("found new branch changeset %s\n") %
905 short(p))
905 short(p))
906 fetch[p] = 1
906 fetch[p] = 1
907 base[i] = 1
907 base[i] = 1
908 else:
908 else:
909 self.ui.debug(_("narrowed branch search to %s:%s\n")
909 self.ui.debug(_("narrowed branch search to %s:%s\n")
910 % (short(p), short(i)))
910 % (short(p), short(i)))
911 search.append((p, i))
911 search.append((p, i))
912 break
912 break
913 p, f = i, f * 2
913 p, f = i, f * 2
914
914
915 # sanity check our fetch list
915 # sanity check our fetch list
916 for f in fetch.keys():
916 for f in fetch.keys():
917 if f in m:
917 if f in m:
918 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
918 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
919
919
920 if base.keys() == [nullid]:
920 if base.keys() == [nullid]:
921 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
921 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
922
922
923 self.ui.note(_("found new changesets starting at ") +
923 self.ui.note(_("found new changesets starting at ") +
924 " ".join([short(f) for f in fetch]) + "\n")
924 " ".join([short(f) for f in fetch]) + "\n")
925
925
926 self.ui.debug(_("%d total queries\n") % reqcnt)
926 self.ui.debug(_("%d total queries\n") % reqcnt)
927
927
928 return fetch.keys()
928 return fetch.keys()
929
929
930 def findoutgoing(self, remote, base=None, heads=None):
930 def findoutgoing(self, remote, base=None, heads=None):
931 if base == None:
931 if base == None:
932 base = {}
932 base = {}
933 self.findincoming(remote, base, heads)
933 self.findincoming(remote, base, heads)
934
934
935 self.ui.debug(_("common changesets up to ")
935 self.ui.debug(_("common changesets up to ")
936 + " ".join(map(short, base.keys())) + "\n")
936 + " ".join(map(short, base.keys())) + "\n")
937
937
938 remain = dict.fromkeys(self.changelog.nodemap)
938 remain = dict.fromkeys(self.changelog.nodemap)
939
939
940 # prune everything remote has from the tree
940 # prune everything remote has from the tree
941 del remain[nullid]
941 del remain[nullid]
942 remove = base.keys()
942 remove = base.keys()
943 while remove:
943 while remove:
944 n = remove.pop(0)
944 n = remove.pop(0)
945 if n in remain:
945 if n in remain:
946 del remain[n]
946 del remain[n]
947 for p in self.changelog.parents(n):
947 for p in self.changelog.parents(n):
948 remove.append(p)
948 remove.append(p)
949
949
950 # find every node whose parents have been pruned
950 # find every node whose parents have been pruned
951 subset = []
951 subset = []
952 for n in remain:
952 for n in remain:
953 p1, p2 = self.changelog.parents(n)
953 p1, p2 = self.changelog.parents(n)
954 if p1 not in remain and p2 not in remain:
954 if p1 not in remain and p2 not in remain:
955 subset.append(n)
955 subset.append(n)
956
956
957 # this is the set of all roots we have to push
957 # this is the set of all roots we have to push
958 return subset
958 return subset
959
959
960 def pull(self, remote, heads=None):
960 def pull(self, remote, heads=None):
961 l = self.lock()
961 l = self.lock()
962
962
963 # if we have an empty repo, fetch everything
963 # if we have an empty repo, fetch everything
964 if self.changelog.tip() == nullid:
964 if self.changelog.tip() == nullid:
965 self.ui.status(_("requesting all changes\n"))
965 self.ui.status(_("requesting all changes\n"))
966 fetch = [nullid]
966 fetch = [nullid]
967 else:
967 else:
968 fetch = self.findincoming(remote)
968 fetch = self.findincoming(remote)
969
969
970 if not fetch:
970 if not fetch:
971 self.ui.status(_("no changes found\n"))
971 self.ui.status(_("no changes found\n"))
972 return 1
972 return 1
973
973
974 if heads is None:
974 if heads is None:
975 cg = remote.changegroup(fetch, 'pull')
975 cg = remote.changegroup(fetch, 'pull')
976 else:
976 else:
977 cg = remote.changegroupsubset(fetch, heads, 'pull')
977 cg = remote.changegroupsubset(fetch, heads, 'pull')
978 return self.addchangegroup(cg)
978 return self.addchangegroup(cg)
979
979
980 def push(self, remote, force=False, revs=None):
980 def push(self, remote, force=False, revs=None):
981 lock = remote.lock()
981 lock = remote.lock()
982
982
983 base = {}
983 base = {}
984 heads = remote.heads()
984 heads = remote.heads()
985 inc = self.findincoming(remote, base, heads)
985 inc = self.findincoming(remote, base, heads)
986 if not force and inc:
986 if not force and inc:
987 self.ui.warn(_("abort: unsynced remote changes!\n"))
987 self.ui.warn(_("abort: unsynced remote changes!\n"))
988 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
988 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
989 return 1
989 return 1
990
990
991 update = self.findoutgoing(remote, base)
991 update = self.findoutgoing(remote, base)
992 if revs is not None:
992 if revs is not None:
993 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
993 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
994 else:
994 else:
995 bases, heads = update, self.changelog.heads()
995 bases, heads = update, self.changelog.heads()
996
996
997 if not bases:
997 if not bases:
998 self.ui.status(_("no changes found\n"))
998 self.ui.status(_("no changes found\n"))
999 return 1
999 return 1
1000 elif not force:
1000 elif not force:
1001 if len(bases) < len(heads):
1001 if len(bases) < len(heads):
1002 self.ui.warn(_("abort: push creates new remote branches!\n"))
1002 self.ui.warn(_("abort: push creates new remote branches!\n"))
1003 self.ui.status(_("(did you forget to merge?"
1003 self.ui.status(_("(did you forget to merge?"
1004 " use push -f to force)\n"))
1004 " use push -f to force)\n"))
1005 return 1
1005 return 1
1006
1006
1007 if revs is None:
1007 if revs is None:
1008 cg = self.changegroup(update, 'push')
1008 cg = self.changegroup(update, 'push')
1009 else:
1009 else:
1010 cg = self.changegroupsubset(update, revs, 'push')
1010 cg = self.changegroupsubset(update, revs, 'push')
1011 return remote.addchangegroup(cg)
1011 return remote.addchangegroup(cg)
1012
1012
1013 def changegroupsubset(self, bases, heads, source):
1013 def changegroupsubset(self, bases, heads, source):
1014 """This function generates a changegroup consisting of all the nodes
1014 """This function generates a changegroup consisting of all the nodes
1015 that are descendents of any of the bases, and ancestors of any of
1015 that are descendents of any of the bases, and ancestors of any of
1016 the heads.
1016 the heads.
1017
1017
1018 It is fairly complex as determining which filenodes and which
1018 It is fairly complex as determining which filenodes and which
1019 manifest nodes need to be included for the changeset to be complete
1019 manifest nodes need to be included for the changeset to be complete
1020 is non-trivial.
1020 is non-trivial.
1021
1021
1022 Another wrinkle is doing the reverse, figuring out which changeset in
1022 Another wrinkle is doing the reverse, figuring out which changeset in
1023 the changegroup a particular filenode or manifestnode belongs to."""
1023 the changegroup a particular filenode or manifestnode belongs to."""
1024
1024
1025 self.hook('preoutgoing', throw=True, source=source)
1025 self.hook('preoutgoing', throw=True, source=source)
1026
1026
1027 # Set up some initial variables
1027 # Set up some initial variables
1028 # Make it easy to refer to self.changelog
1028 # Make it easy to refer to self.changelog
1029 cl = self.changelog
1029 cl = self.changelog
1030 # msng is short for missing - compute the list of changesets in this
1030 # msng is short for missing - compute the list of changesets in this
1031 # changegroup.
1031 # changegroup.
1032 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1032 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1033 # Some bases may turn out to be superfluous, and some heads may be
1033 # Some bases may turn out to be superfluous, and some heads may be
1034 # too. nodesbetween will return the minimal set of bases and heads
1034 # too. nodesbetween will return the minimal set of bases and heads
1035 # necessary to re-create the changegroup.
1035 # necessary to re-create the changegroup.
1036
1036
1037 # Known heads are the list of heads that it is assumed the recipient
1037 # Known heads are the list of heads that it is assumed the recipient
1038 # of this changegroup will know about.
1038 # of this changegroup will know about.
1039 knownheads = {}
1039 knownheads = {}
1040 # We assume that all parents of bases are known heads.
1040 # We assume that all parents of bases are known heads.
1041 for n in bases:
1041 for n in bases:
1042 for p in cl.parents(n):
1042 for p in cl.parents(n):
1043 if p != nullid:
1043 if p != nullid:
1044 knownheads[p] = 1
1044 knownheads[p] = 1
1045 knownheads = knownheads.keys()
1045 knownheads = knownheads.keys()
1046 if knownheads:
1046 if knownheads:
1047 # Now that we know what heads are known, we can compute which
1047 # Now that we know what heads are known, we can compute which
1048 # changesets are known. The recipient must know about all
1048 # changesets are known. The recipient must know about all
1049 # changesets required to reach the known heads from the null
1049 # changesets required to reach the known heads from the null
1050 # changeset.
1050 # changeset.
1051 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1051 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1052 junk = None
1052 junk = None
1053 # Transform the list into an ersatz set.
1053 # Transform the list into an ersatz set.
1054 has_cl_set = dict.fromkeys(has_cl_set)
1054 has_cl_set = dict.fromkeys(has_cl_set)
1055 else:
1055 else:
1056 # If there were no known heads, the recipient cannot be assumed to
1056 # If there were no known heads, the recipient cannot be assumed to
1057 # know about any changesets.
1057 # know about any changesets.
1058 has_cl_set = {}
1058 has_cl_set = {}
1059
1059
1060 # Make it easy to refer to self.manifest
1060 # Make it easy to refer to self.manifest
1061 mnfst = self.manifest
1061 mnfst = self.manifest
1062 # We don't know which manifests are missing yet
1062 # We don't know which manifests are missing yet
1063 msng_mnfst_set = {}
1063 msng_mnfst_set = {}
1064 # Nor do we know which filenodes are missing.
1064 # Nor do we know which filenodes are missing.
1065 msng_filenode_set = {}
1065 msng_filenode_set = {}
1066
1066
1067 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1067 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1068 junk = None
1068 junk = None
1069
1069
1070 # A changeset always belongs to itself, so the changenode lookup
1070 # A changeset always belongs to itself, so the changenode lookup
1071 # function for a changenode is identity.
1071 # function for a changenode is identity.
1072 def identity(x):
1072 def identity(x):
1073 return x
1073 return x
1074
1074
1075 # A function generating function. Sets up an environment for the
1075 # A function generating function. Sets up an environment for the
1076 # inner function.
1076 # inner function.
1077 def cmp_by_rev_func(revlog):
1077 def cmp_by_rev_func(revlog):
1078 # Compare two nodes by their revision number in the environment's
1078 # Compare two nodes by their revision number in the environment's
1079 # revision history. Since the revision number both represents the
1079 # revision history. Since the revision number both represents the
1080 # most efficient order to read the nodes in, and represents a
1080 # most efficient order to read the nodes in, and represents a
1081 # topological sorting of the nodes, this function is often useful.
1081 # topological sorting of the nodes, this function is often useful.
1082 def cmp_by_rev(a, b):
1082 def cmp_by_rev(a, b):
1083 return cmp(revlog.rev(a), revlog.rev(b))
1083 return cmp(revlog.rev(a), revlog.rev(b))
1084 return cmp_by_rev
1084 return cmp_by_rev
1085
1085
1086 # If we determine that a particular file or manifest node must be a
1086 # If we determine that a particular file or manifest node must be a
1087 # node that the recipient of the changegroup will already have, we can
1087 # node that the recipient of the changegroup will already have, we can
1088 # also assume the recipient will have all the parents. This function
1088 # also assume the recipient will have all the parents. This function
1089 # prunes them from the set of missing nodes.
1089 # prunes them from the set of missing nodes.
1090 def prune_parents(revlog, hasset, msngset):
1090 def prune_parents(revlog, hasset, msngset):
1091 haslst = hasset.keys()
1091 haslst = hasset.keys()
1092 haslst.sort(cmp_by_rev_func(revlog))
1092 haslst.sort(cmp_by_rev_func(revlog))
1093 for node in haslst:
1093 for node in haslst:
1094 parentlst = [p for p in revlog.parents(node) if p != nullid]
1094 parentlst = [p for p in revlog.parents(node) if p != nullid]
1095 while parentlst:
1095 while parentlst:
1096 n = parentlst.pop()
1096 n = parentlst.pop()
1097 if n not in hasset:
1097 if n not in hasset:
1098 hasset[n] = 1
1098 hasset[n] = 1
1099 p = [p for p in revlog.parents(n) if p != nullid]
1099 p = [p for p in revlog.parents(n) if p != nullid]
1100 parentlst.extend(p)
1100 parentlst.extend(p)
1101 for n in hasset:
1101 for n in hasset:
1102 msngset.pop(n, None)
1102 msngset.pop(n, None)
1103
1103
1104 # This is a function generating function used to set up an environment
1104 # This is a function generating function used to set up an environment
1105 # for the inner function to execute in.
1105 # for the inner function to execute in.
1106 def manifest_and_file_collector(changedfileset):
1106 def manifest_and_file_collector(changedfileset):
1107 # This is an information gathering function that gathers
1107 # This is an information gathering function that gathers
1108 # information from each changeset node that goes out as part of
1108 # information from each changeset node that goes out as part of
1109 # the changegroup. The information gathered is a list of which
1109 # the changegroup. The information gathered is a list of which
1110 # manifest nodes are potentially required (the recipient may
1110 # manifest nodes are potentially required (the recipient may
1111 # already have them) and total list of all files which were
1111 # already have them) and total list of all files which were
1112 # changed in any changeset in the changegroup.
1112 # changed in any changeset in the changegroup.
1113 #
1113 #
1114 # We also remember the first changenode we saw any manifest
1114 # We also remember the first changenode we saw any manifest
1115 # referenced by so we can later determine which changenode 'owns'
1115 # referenced by so we can later determine which changenode 'owns'
1116 # the manifest.
1116 # the manifest.
1117 def collect_manifests_and_files(clnode):
1117 def collect_manifests_and_files(clnode):
1118 c = cl.read(clnode)
1118 c = cl.read(clnode)
1119 for f in c[3]:
1119 for f in c[3]:
1120 # This is to make sure we only have one instance of each
1120 # This is to make sure we only have one instance of each
1121 # filename string for each filename.
1121 # filename string for each filename.
1122 changedfileset.setdefault(f, f)
1122 changedfileset.setdefault(f, f)
1123 msng_mnfst_set.setdefault(c[0], clnode)
1123 msng_mnfst_set.setdefault(c[0], clnode)
1124 return collect_manifests_and_files
1124 return collect_manifests_and_files
1125
1125
1126 # Figure out which manifest nodes (of the ones we think might be part
1126 # Figure out which manifest nodes (of the ones we think might be part
1127 # of the changegroup) the recipient must know about and remove them
1127 # of the changegroup) the recipient must know about and remove them
1128 # from the changegroup.
1128 # from the changegroup.
1129 def prune_manifests():
1129 def prune_manifests():
1130 has_mnfst_set = {}
1130 has_mnfst_set = {}
1131 for n in msng_mnfst_set:
1131 for n in msng_mnfst_set:
1132 # If a 'missing' manifest thinks it belongs to a changenode
1132 # If a 'missing' manifest thinks it belongs to a changenode
1133 # the recipient is assumed to have, obviously the recipient
1133 # the recipient is assumed to have, obviously the recipient
1134 # must have that manifest.
1134 # must have that manifest.
1135 linknode = cl.node(mnfst.linkrev(n))
1135 linknode = cl.node(mnfst.linkrev(n))
1136 if linknode in has_cl_set:
1136 if linknode in has_cl_set:
1137 has_mnfst_set[n] = 1
1137 has_mnfst_set[n] = 1
1138 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1138 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1139
1139
1140 # Use the information collected in collect_manifests_and_files to say
1140 # Use the information collected in collect_manifests_and_files to say
1141 # which changenode any manifestnode belongs to.
1141 # which changenode any manifestnode belongs to.
1142 def lookup_manifest_link(mnfstnode):
1142 def lookup_manifest_link(mnfstnode):
1143 return msng_mnfst_set[mnfstnode]
1143 return msng_mnfst_set[mnfstnode]
1144
1144
1145 # A function generating function that sets up the initial environment
1145 # A function generating function that sets up the initial environment
1146 # the inner function.
1146 # the inner function.
1147 def filenode_collector(changedfiles):
1147 def filenode_collector(changedfiles):
1148 next_rev = [0]
1148 next_rev = [0]
1149 # This gathers information from each manifestnode included in the
1149 # This gathers information from each manifestnode included in the
1150 # changegroup about which filenodes the manifest node references
1150 # changegroup about which filenodes the manifest node references
1151 # so we can include those in the changegroup too.
1151 # so we can include those in the changegroup too.
1152 #
1152 #
1153 # It also remembers which changenode each filenode belongs to. It
1153 # It also remembers which changenode each filenode belongs to. It
1154 # does this by assuming the a filenode belongs to the changenode
1154 # does this by assuming the a filenode belongs to the changenode
1155 # the first manifest that references it belongs to.
1155 # the first manifest that references it belongs to.
1156 def collect_msng_filenodes(mnfstnode):
1156 def collect_msng_filenodes(mnfstnode):
1157 r = mnfst.rev(mnfstnode)
1157 r = mnfst.rev(mnfstnode)
1158 if r == next_rev[0]:
1158 if r == next_rev[0]:
1159 # If the last rev we looked at was the one just previous,
1159 # If the last rev we looked at was the one just previous,
1160 # we only need to see a diff.
1160 # we only need to see a diff.
1161 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1161 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1162 # For each line in the delta
1162 # For each line in the delta
1163 for dline in delta.splitlines():
1163 for dline in delta.splitlines():
1164 # get the filename and filenode for that line
1164 # get the filename and filenode for that line
1165 f, fnode = dline.split('\0')
1165 f, fnode = dline.split('\0')
1166 fnode = bin(fnode[:40])
1166 fnode = bin(fnode[:40])
1167 f = changedfiles.get(f, None)
1167 f = changedfiles.get(f, None)
1168 # And if the file is in the list of files we care
1168 # And if the file is in the list of files we care
1169 # about.
1169 # about.
1170 if f is not None:
1170 if f is not None:
1171 # Get the changenode this manifest belongs to
1171 # Get the changenode this manifest belongs to
1172 clnode = msng_mnfst_set[mnfstnode]
1172 clnode = msng_mnfst_set[mnfstnode]
1173 # Create the set of filenodes for the file if
1173 # Create the set of filenodes for the file if
1174 # there isn't one already.
1174 # there isn't one already.
1175 ndset = msng_filenode_set.setdefault(f, {})
1175 ndset = msng_filenode_set.setdefault(f, {})
1176 # And set the filenode's changelog node to the
1176 # And set the filenode's changelog node to the
1177 # manifest's if it hasn't been set already.
1177 # manifest's if it hasn't been set already.
1178 ndset.setdefault(fnode, clnode)
1178 ndset.setdefault(fnode, clnode)
1179 else:
1179 else:
1180 # Otherwise we need a full manifest.
1180 # Otherwise we need a full manifest.
1181 m = mnfst.read(mnfstnode)
1181 m = mnfst.read(mnfstnode)
1182 # For every file in we care about.
1182 # For every file in we care about.
1183 for f in changedfiles:
1183 for f in changedfiles:
1184 fnode = m.get(f, None)
1184 fnode = m.get(f, None)
1185 # If it's in the manifest
1185 # If it's in the manifest
1186 if fnode is not None:
1186 if fnode is not None:
1187 # See comments above.
1187 # See comments above.
1188 clnode = msng_mnfst_set[mnfstnode]
1188 clnode = msng_mnfst_set[mnfstnode]
1189 ndset = msng_filenode_set.setdefault(f, {})
1189 ndset = msng_filenode_set.setdefault(f, {})
1190 ndset.setdefault(fnode, clnode)
1190 ndset.setdefault(fnode, clnode)
1191 # Remember the revision we hope to see next.
1191 # Remember the revision we hope to see next.
1192 next_rev[0] = r + 1
1192 next_rev[0] = r + 1
1193 return collect_msng_filenodes
1193 return collect_msng_filenodes
1194
1194
1195 # We have a list of filenodes we think we need for a file, lets remove
1195 # We have a list of filenodes we think we need for a file, lets remove
1196 # all those we now the recipient must have.
1196 # all those we now the recipient must have.
1197 def prune_filenodes(f, filerevlog):
1197 def prune_filenodes(f, filerevlog):
1198 msngset = msng_filenode_set[f]
1198 msngset = msng_filenode_set[f]
1199 hasset = {}
1199 hasset = {}
1200 # If a 'missing' filenode thinks it belongs to a changenode we
1200 # If a 'missing' filenode thinks it belongs to a changenode we
1201 # assume the recipient must have, then the recipient must have
1201 # assume the recipient must have, then the recipient must have
1202 # that filenode.
1202 # that filenode.
1203 for n in msngset:
1203 for n in msngset:
1204 clnode = cl.node(filerevlog.linkrev(n))
1204 clnode = cl.node(filerevlog.linkrev(n))
1205 if clnode in has_cl_set:
1205 if clnode in has_cl_set:
1206 hasset[n] = 1
1206 hasset[n] = 1
1207 prune_parents(filerevlog, hasset, msngset)
1207 prune_parents(filerevlog, hasset, msngset)
1208
1208
1209 # A function generator function that sets up the a context for the
1209 # A function generator function that sets up the a context for the
1210 # inner function.
1210 # inner function.
1211 def lookup_filenode_link_func(fname):
1211 def lookup_filenode_link_func(fname):
1212 msngset = msng_filenode_set[fname]
1212 msngset = msng_filenode_set[fname]
1213 # Lookup the changenode the filenode belongs to.
1213 # Lookup the changenode the filenode belongs to.
1214 def lookup_filenode_link(fnode):
1214 def lookup_filenode_link(fnode):
1215 return msngset[fnode]
1215 return msngset[fnode]
1216 return lookup_filenode_link
1216 return lookup_filenode_link
1217
1217
1218 # Now that we have all theses utility functions to help out and
1218 # Now that we have all theses utility functions to help out and
1219 # logically divide up the task, generate the group.
1219 # logically divide up the task, generate the group.
1220 def gengroup():
1220 def gengroup():
1221 # The set of changed files starts empty.
1221 # The set of changed files starts empty.
1222 changedfiles = {}
1222 changedfiles = {}
1223 # Create a changenode group generator that will call our functions
1223 # Create a changenode group generator that will call our functions
1224 # back to lookup the owning changenode and collect information.
1224 # back to lookup the owning changenode and collect information.
1225 group = cl.group(msng_cl_lst, identity,
1225 group = cl.group(msng_cl_lst, identity,
1226 manifest_and_file_collector(changedfiles))
1226 manifest_and_file_collector(changedfiles))
1227 for chnk in group:
1227 for chnk in group:
1228 yield chnk
1228 yield chnk
1229
1229
1230 # The list of manifests has been collected by the generator
1230 # The list of manifests has been collected by the generator
1231 # calling our functions back.
1231 # calling our functions back.
1232 prune_manifests()
1232 prune_manifests()
1233 msng_mnfst_lst = msng_mnfst_set.keys()
1233 msng_mnfst_lst = msng_mnfst_set.keys()
1234 # Sort the manifestnodes by revision number.
1234 # Sort the manifestnodes by revision number.
1235 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1235 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1236 # Create a generator for the manifestnodes that calls our lookup
1236 # Create a generator for the manifestnodes that calls our lookup
1237 # and data collection functions back.
1237 # and data collection functions back.
1238 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1238 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1239 filenode_collector(changedfiles))
1239 filenode_collector(changedfiles))
1240 for chnk in group:
1240 for chnk in group:
1241 yield chnk
1241 yield chnk
1242
1242
1243 # These are no longer needed, dereference and toss the memory for
1243 # These are no longer needed, dereference and toss the memory for
1244 # them.
1244 # them.
1245 msng_mnfst_lst = None
1245 msng_mnfst_lst = None
1246 msng_mnfst_set.clear()
1246 msng_mnfst_set.clear()
1247
1247
1248 changedfiles = changedfiles.keys()
1248 changedfiles = changedfiles.keys()
1249 changedfiles.sort()
1249 changedfiles.sort()
1250 # Go through all our files in order sorted by name.
1250 # Go through all our files in order sorted by name.
1251 for fname in changedfiles:
1251 for fname in changedfiles:
1252 filerevlog = self.file(fname)
1252 filerevlog = self.file(fname)
1253 # Toss out the filenodes that the recipient isn't really
1253 # Toss out the filenodes that the recipient isn't really
1254 # missing.
1254 # missing.
1255 if msng_filenode_set.has_key(fname):
1255 if msng_filenode_set.has_key(fname):
1256 prune_filenodes(fname, filerevlog)
1256 prune_filenodes(fname, filerevlog)
1257 msng_filenode_lst = msng_filenode_set[fname].keys()
1257 msng_filenode_lst = msng_filenode_set[fname].keys()
1258 else:
1258 else:
1259 msng_filenode_lst = []
1259 msng_filenode_lst = []
1260 # If any filenodes are left, generate the group for them,
1260 # If any filenodes are left, generate the group for them,
1261 # otherwise don't bother.
1261 # otherwise don't bother.
1262 if len(msng_filenode_lst) > 0:
1262 if len(msng_filenode_lst) > 0:
1263 yield struct.pack(">l", len(fname) + 4) + fname
1263 yield struct.pack(">l", len(fname) + 4) + fname
1264 # Sort the filenodes by their revision #
1264 # Sort the filenodes by their revision #
1265 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1265 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1266 # Create a group generator and only pass in a changenode
1266 # Create a group generator and only pass in a changenode
1267 # lookup function as we need to collect no information
1267 # lookup function as we need to collect no information
1268 # from filenodes.
1268 # from filenodes.
1269 group = filerevlog.group(msng_filenode_lst,
1269 group = filerevlog.group(msng_filenode_lst,
1270 lookup_filenode_link_func(fname))
1270 lookup_filenode_link_func(fname))
1271 for chnk in group:
1271 for chnk in group:
1272 yield chnk
1272 yield chnk
1273 if msng_filenode_set.has_key(fname):
1273 if msng_filenode_set.has_key(fname):
1274 # Don't need this anymore, toss it to free memory.
1274 # Don't need this anymore, toss it to free memory.
1275 del msng_filenode_set[fname]
1275 del msng_filenode_set[fname]
1276 # Signal that no more groups are left.
1276 # Signal that no more groups are left.
1277 yield struct.pack(">l", 0)
1277 yield struct.pack(">l", 0)
1278
1278
1279 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1279 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1280
1280
1281 return util.chunkbuffer(gengroup())
1281 return util.chunkbuffer(gengroup())
1282
1282
1283 def changegroup(self, basenodes, source):
1283 def changegroup(self, basenodes, source):
1284 """Generate a changegroup of all nodes that we have that a recipient
1284 """Generate a changegroup of all nodes that we have that a recipient
1285 doesn't.
1285 doesn't.
1286
1286
1287 This is much easier than the previous function as we can assume that
1287 This is much easier than the previous function as we can assume that
1288 the recipient has any changenode we aren't sending them."""
1288 the recipient has any changenode we aren't sending them."""
1289
1289
1290 self.hook('preoutgoing', throw=True, source=source)
1290 self.hook('preoutgoing', throw=True, source=source)
1291
1291
1292 cl = self.changelog
1292 cl = self.changelog
1293 nodes = cl.nodesbetween(basenodes, None)[0]
1293 nodes = cl.nodesbetween(basenodes, None)[0]
1294 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1294 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1295
1295
1296 def identity(x):
1296 def identity(x):
1297 return x
1297 return x
1298
1298
1299 def gennodelst(revlog):
1299 def gennodelst(revlog):
1300 for r in xrange(0, revlog.count()):
1300 for r in xrange(0, revlog.count()):
1301 n = revlog.node(r)
1301 n = revlog.node(r)
1302 if revlog.linkrev(n) in revset:
1302 if revlog.linkrev(n) in revset:
1303 yield n
1303 yield n
1304
1304
1305 def changed_file_collector(changedfileset):
1305 def changed_file_collector(changedfileset):
1306 def collect_changed_files(clnode):
1306 def collect_changed_files(clnode):
1307 c = cl.read(clnode)
1307 c = cl.read(clnode)
1308 for fname in c[3]:
1308 for fname in c[3]:
1309 changedfileset[fname] = 1
1309 changedfileset[fname] = 1
1310 return collect_changed_files
1310 return collect_changed_files
1311
1311
1312 def lookuprevlink_func(revlog):
1312 def lookuprevlink_func(revlog):
1313 def lookuprevlink(n):
1313 def lookuprevlink(n):
1314 return cl.node(revlog.linkrev(n))
1314 return cl.node(revlog.linkrev(n))
1315 return lookuprevlink
1315 return lookuprevlink
1316
1316
1317 def gengroup():
1317 def gengroup():
1318 # construct a list of all changed files
1318 # construct a list of all changed files
1319 changedfiles = {}
1319 changedfiles = {}
1320
1320
1321 for chnk in cl.group(nodes, identity,
1321 for chnk in cl.group(nodes, identity,
1322 changed_file_collector(changedfiles)):
1322 changed_file_collector(changedfiles)):
1323 yield chnk
1323 yield chnk
1324 changedfiles = changedfiles.keys()
1324 changedfiles = changedfiles.keys()
1325 changedfiles.sort()
1325 changedfiles.sort()
1326
1326
1327 mnfst = self.manifest
1327 mnfst = self.manifest
1328 nodeiter = gennodelst(mnfst)
1328 nodeiter = gennodelst(mnfst)
1329 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1329 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1330 yield chnk
1330 yield chnk
1331
1331
1332 for fname in changedfiles:
1332 for fname in changedfiles:
1333 filerevlog = self.file(fname)
1333 filerevlog = self.file(fname)
1334 nodeiter = gennodelst(filerevlog)
1334 nodeiter = gennodelst(filerevlog)
1335 nodeiter = list(nodeiter)
1335 nodeiter = list(nodeiter)
1336 if nodeiter:
1336 if nodeiter:
1337 yield struct.pack(">l", len(fname) + 4) + fname
1337 yield struct.pack(">l", len(fname) + 4) + fname
1338 lookup = lookuprevlink_func(filerevlog)
1338 lookup = lookuprevlink_func(filerevlog)
1339 for chnk in filerevlog.group(nodeiter, lookup):
1339 for chnk in filerevlog.group(nodeiter, lookup):
1340 yield chnk
1340 yield chnk
1341
1341
1342 yield struct.pack(">l", 0)
1342 yield struct.pack(">l", 0)
1343 self.hook('outgoing', node=hex(nodes[0]), source=source)
1343 self.hook('outgoing', node=hex(nodes[0]), source=source)
1344
1344
1345 return util.chunkbuffer(gengroup())
1345 return util.chunkbuffer(gengroup())
1346
1346
1347 def addchangegroup(self, source):
1347 def addchangegroup(self, source):
1348
1348
1349 def getchunk():
1349 def getchunk():
1350 d = source.read(4)
1350 d = source.read(4)
1351 if not d:
1351 if not d:
1352 return ""
1352 return ""
1353 l = struct.unpack(">l", d)[0]
1353 l = struct.unpack(">l", d)[0]
1354 if l <= 4:
1354 if l <= 4:
1355 return ""
1355 return ""
1356 d = source.read(l - 4)
1356 d = source.read(l - 4)
1357 if len(d) < l - 4:
1357 if len(d) < l - 4:
1358 raise repo.RepoError(_("premature EOF reading chunk"
1358 raise repo.RepoError(_("premature EOF reading chunk"
1359 " (got %d bytes, expected %d)")
1359 " (got %d bytes, expected %d)")
1360 % (len(d), l - 4))
1360 % (len(d), l - 4))
1361 return d
1361 return d
1362
1362
1363 def getgroup():
1363 def getgroup():
1364 while 1:
1364 while 1:
1365 c = getchunk()
1365 c = getchunk()
1366 if not c:
1366 if not c:
1367 break
1367 break
1368 yield c
1368 yield c
1369
1369
1370 def csmap(x):
1370 def csmap(x):
1371 self.ui.debug(_("add changeset %s\n") % short(x))
1371 self.ui.debug(_("add changeset %s\n") % short(x))
1372 return self.changelog.count()
1372 return self.changelog.count()
1373
1373
1374 def revmap(x):
1374 def revmap(x):
1375 return self.changelog.rev(x)
1375 return self.changelog.rev(x)
1376
1376
1377 if not source:
1377 if not source:
1378 return
1378 return
1379
1379
1380 self.hook('prechangegroup', throw=True)
1380 self.hook('prechangegroup', throw=True)
1381
1381
1382 changesets = files = revisions = 0
1382 changesets = files = revisions = 0
1383
1383
1384 tr = self.transaction()
1384 tr = self.transaction()
1385
1385
1386 oldheads = len(self.changelog.heads())
1386 oldheads = len(self.changelog.heads())
1387
1387
1388 # pull off the changeset group
1388 # pull off the changeset group
1389 self.ui.status(_("adding changesets\n"))
1389 self.ui.status(_("adding changesets\n"))
1390 co = self.changelog.tip()
1390 co = self.changelog.tip()
1391 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1391 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1392 cnr, cor = map(self.changelog.rev, (cn, co))
1392 cnr, cor = map(self.changelog.rev, (cn, co))
1393 if cn == nullid:
1393 if cn == nullid:
1394 cnr = cor
1394 cnr = cor
1395 changesets = cnr - cor
1395 changesets = cnr - cor
1396
1396
1397 # pull off the manifest group
1397 # pull off the manifest group
1398 self.ui.status(_("adding manifests\n"))
1398 self.ui.status(_("adding manifests\n"))
1399 mm = self.manifest.tip()
1399 mm = self.manifest.tip()
1400 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1400 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1401
1401
1402 # process the files
1402 # process the files
1403 self.ui.status(_("adding file changes\n"))
1403 self.ui.status(_("adding file changes\n"))
1404 while 1:
1404 while 1:
1405 f = getchunk()
1405 f = getchunk()
1406 if not f:
1406 if not f:
1407 break
1407 break
1408 self.ui.debug(_("adding %s revisions\n") % f)
1408 self.ui.debug(_("adding %s revisions\n") % f)
1409 fl = self.file(f)
1409 fl = self.file(f)
1410 o = fl.count()
1410 o = fl.count()
1411 n = fl.addgroup(getgroup(), revmap, tr)
1411 n = fl.addgroup(getgroup(), revmap, tr)
1412 revisions += fl.count() - o
1412 revisions += fl.count() - o
1413 files += 1
1413 files += 1
1414
1414
1415 newheads = len(self.changelog.heads())
1415 newheads = len(self.changelog.heads())
1416 heads = ""
1416 heads = ""
1417 if oldheads and newheads > oldheads:
1417 if oldheads and newheads > oldheads:
1418 heads = _(" (+%d heads)") % (newheads - oldheads)
1418 heads = _(" (+%d heads)") % (newheads - oldheads)
1419
1419
1420 self.ui.status(_("added %d changesets"
1420 self.ui.status(_("added %d changesets"
1421 " with %d changes to %d files%s\n")
1421 " with %d changes to %d files%s\n")
1422 % (changesets, revisions, files, heads))
1422 % (changesets, revisions, files, heads))
1423
1423
1424 self.hook('pretxnchangegroup', throw=True,
1424 self.hook('pretxnchangegroup', throw=True,
1425 node=hex(self.changelog.node(cor+1)))
1425 node=hex(self.changelog.node(cor+1)))
1426
1426
1427 tr.close()
1427 tr.close()
1428
1428
1429 if changesets > 0:
1429 if changesets > 0:
1430 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1430 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1431
1431
1432 for i in range(cor + 1, cnr + 1):
1432 for i in range(cor + 1, cnr + 1):
1433 self.hook("incoming", node=hex(self.changelog.node(i)))
1433 self.hook("incoming", node=hex(self.changelog.node(i)))
1434
1434
1435 def update(self, node, allow=False, force=False, choose=None,
1435 def update(self, node, allow=False, force=False, choose=None,
1436 moddirstate=True, forcemerge=False, wlock=None):
1436 moddirstate=True, forcemerge=False, wlock=None):
1437 pl = self.dirstate.parents()
1437 pl = self.dirstate.parents()
1438 if not force and pl[1] != nullid:
1438 if not force and pl[1] != nullid:
1439 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1439 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1440 return 1
1440 return 1
1441
1441
1442 err = False
1442 err = False
1443
1443
1444 p1, p2 = pl[0], node
1444 p1, p2 = pl[0], node
1445 pa = self.changelog.ancestor(p1, p2)
1445 pa = self.changelog.ancestor(p1, p2)
1446 m1n = self.changelog.read(p1)[0]
1446 m1n = self.changelog.read(p1)[0]
1447 m2n = self.changelog.read(p2)[0]
1447 m2n = self.changelog.read(p2)[0]
1448 man = self.manifest.ancestor(m1n, m2n)
1448 man = self.manifest.ancestor(m1n, m2n)
1449 m1 = self.manifest.read(m1n)
1449 m1 = self.manifest.read(m1n)
1450 mf1 = self.manifest.readflags(m1n)
1450 mf1 = self.manifest.readflags(m1n)
1451 m2 = self.manifest.read(m2n).copy()
1451 m2 = self.manifest.read(m2n).copy()
1452 mf2 = self.manifest.readflags(m2n)
1452 mf2 = self.manifest.readflags(m2n)
1453 ma = self.manifest.read(man)
1453 ma = self.manifest.read(man)
1454 mfa = self.manifest.readflags(man)
1454 mfa = self.manifest.readflags(man)
1455
1455
1456 modified, added, removed, deleted, unknown = self.changes()
1456 modified, added, removed, deleted, unknown = self.changes()
1457
1457
1458 # is this a jump, or a merge? i.e. is there a linear path
1458 # is this a jump, or a merge? i.e. is there a linear path
1459 # from p1 to p2?
1459 # from p1 to p2?
1460 linear_path = (pa == p1 or pa == p2)
1460 linear_path = (pa == p1 or pa == p2)
1461
1461
1462 if allow and linear_path:
1462 if allow and linear_path:
1463 raise util.Abort(_("there is nothing to merge, "
1463 raise util.Abort(_("there is nothing to merge, "
1464 "just use 'hg update'"))
1464 "just use 'hg update'"))
1465 if allow and not forcemerge:
1465 if allow and not forcemerge:
1466 if modified or added or removed:
1466 if modified or added or removed:
1467 raise util.Abort(_("outstanding uncommited changes"))
1467 raise util.Abort(_("outstanding uncommited changes"))
1468 if not forcemerge and not force:
1468 if not forcemerge and not force:
1469 for f in unknown:
1469 for f in unknown:
1470 if f in m2:
1470 if f in m2:
1471 t1 = self.wread(f)
1471 t1 = self.wread(f)
1472 t2 = self.file(f).read(m2[f])
1472 t2 = self.file(f).read(m2[f])
1473 if cmp(t1, t2) != 0:
1473 if cmp(t1, t2) != 0:
1474 raise util.Abort(_("'%s' already exists in the working"
1474 raise util.Abort(_("'%s' already exists in the working"
1475 " dir and differs from remote") % f)
1475 " dir and differs from remote") % f)
1476
1476
1477 # resolve the manifest to determine which files
1477 # resolve the manifest to determine which files
1478 # we care about merging
1478 # we care about merging
1479 self.ui.note(_("resolving manifests\n"))
1479 self.ui.note(_("resolving manifests\n"))
1480 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1480 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1481 (force, allow, moddirstate, linear_path))
1481 (force, allow, moddirstate, linear_path))
1482 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1482 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1483 (short(man), short(m1n), short(m2n)))
1483 (short(man), short(m1n), short(m2n)))
1484
1484
1485 merge = {}
1485 merge = {}
1486 get = {}
1486 get = {}
1487 remove = []
1487 remove = []
1488
1488
1489 # construct a working dir manifest
1489 # construct a working dir manifest
1490 mw = m1.copy()
1490 mw = m1.copy()
1491 mfw = mf1.copy()
1491 mfw = mf1.copy()
1492 umap = dict.fromkeys(unknown)
1492 umap = dict.fromkeys(unknown)
1493
1493
1494 for f in added + modified + unknown:
1494 for f in added + modified + unknown:
1495 mw[f] = ""
1495 mw[f] = ""
1496 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1496 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1497
1497
1498 if moddirstate and not wlock:
1498 if moddirstate and not wlock:
1499 wlock = self.wlock()
1499 wlock = self.wlock()
1500
1500
1501 for f in deleted + removed:
1501 for f in deleted + removed:
1502 if f in mw:
1502 if f in mw:
1503 del mw[f]
1503 del mw[f]
1504
1504
1505 # If we're jumping between revisions (as opposed to merging),
1505 # If we're jumping between revisions (as opposed to merging),
1506 # and if neither the working directory nor the target rev has
1506 # and if neither the working directory nor the target rev has
1507 # the file, then we need to remove it from the dirstate, to
1507 # the file, then we need to remove it from the dirstate, to
1508 # prevent the dirstate from listing the file when it is no
1508 # prevent the dirstate from listing the file when it is no
1509 # longer in the manifest.
1509 # longer in the manifest.
1510 if moddirstate and linear_path and f not in m2:
1510 if moddirstate and linear_path and f not in m2:
1511 self.dirstate.forget((f,))
1511 self.dirstate.forget((f,))
1512
1512
1513 # Compare manifests
1513 # Compare manifests
1514 for f, n in mw.iteritems():
1514 for f, n in mw.iteritems():
1515 if choose and not choose(f):
1515 if choose and not choose(f):
1516 continue
1516 continue
1517 if f in m2:
1517 if f in m2:
1518 s = 0
1518 s = 0
1519
1519
1520 # is the wfile new since m1, and match m2?
1520 # is the wfile new since m1, and match m2?
1521 if f not in m1:
1521 if f not in m1:
1522 t1 = self.wread(f)
1522 t1 = self.wread(f)
1523 t2 = self.file(f).read(m2[f])
1523 t2 = self.file(f).read(m2[f])
1524 if cmp(t1, t2) == 0:
1524 if cmp(t1, t2) == 0:
1525 n = m2[f]
1525 n = m2[f]
1526 del t1, t2
1526 del t1, t2
1527
1527
1528 # are files different?
1528 # are files different?
1529 if n != m2[f]:
1529 if n != m2[f]:
1530 a = ma.get(f, nullid)
1530 a = ma.get(f, nullid)
1531 # are both different from the ancestor?
1531 # are both different from the ancestor?
1532 if n != a and m2[f] != a:
1532 if n != a and m2[f] != a:
1533 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1533 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1534 # merge executable bits
1534 # merge executable bits
1535 # "if we changed or they changed, change in merge"
1535 # "if we changed or they changed, change in merge"
1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1537 mode = ((a^b) | (a^c)) ^ a
1537 mode = ((a^b) | (a^c)) ^ a
1538 merge[f] = (m1.get(f, nullid), m2[f], mode)
1538 merge[f] = (m1.get(f, nullid), m2[f], mode)
1539 s = 1
1539 s = 1
1540 # are we clobbering?
1540 # are we clobbering?
1541 # is remote's version newer?
1541 # is remote's version newer?
1542 # or are we going back in time?
1542 # or are we going back in time?
1543 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1543 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1544 self.ui.debug(_(" remote %s is newer, get\n") % f)
1544 self.ui.debug(_(" remote %s is newer, get\n") % f)
1545 get[f] = m2[f]
1545 get[f] = m2[f]
1546 s = 1
1546 s = 1
1547 elif f in umap:
1547 elif f in umap:
1548 # this unknown file is the same as the checkout
1548 # this unknown file is the same as the checkout
1549 get[f] = m2[f]
1549 get[f] = m2[f]
1550
1550
1551 if not s and mfw[f] != mf2[f]:
1551 if not s and mfw[f] != mf2[f]:
1552 if force:
1552 if force:
1553 self.ui.debug(_(" updating permissions for %s\n") % f)
1553 self.ui.debug(_(" updating permissions for %s\n") % f)
1554 util.set_exec(self.wjoin(f), mf2[f])
1554 util.set_exec(self.wjoin(f), mf2[f])
1555 else:
1555 else:
1556 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1556 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1557 mode = ((a^b) | (a^c)) ^ a
1557 mode = ((a^b) | (a^c)) ^ a
1558 if mode != b:
1558 if mode != b:
1559 self.ui.debug(_(" updating permissions for %s\n")
1559 self.ui.debug(_(" updating permissions for %s\n")
1560 % f)
1560 % f)
1561 util.set_exec(self.wjoin(f), mode)
1561 util.set_exec(self.wjoin(f), mode)
1562 del m2[f]
1562 del m2[f]
1563 elif f in ma:
1563 elif f in ma:
1564 if n != ma[f]:
1564 if n != ma[f]:
1565 r = _("d")
1565 r = _("d")
1566 if not force and (linear_path or allow):
1566 if not force and (linear_path or allow):
1567 r = self.ui.prompt(
1567 r = self.ui.prompt(
1568 (_(" local changed %s which remote deleted\n") % f) +
1568 (_(" local changed %s which remote deleted\n") % f) +
1569 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1569 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1570 if r == _("d"):
1570 if r == _("d"):
1571 remove.append(f)
1571 remove.append(f)
1572 else:
1572 else:
1573 self.ui.debug(_("other deleted %s\n") % f)
1573 self.ui.debug(_("other deleted %s\n") % f)
1574 remove.append(f) # other deleted it
1574 remove.append(f) # other deleted it
1575 else:
1575 else:
1576 # file is created on branch or in working directory
1576 # file is created on branch or in working directory
1577 if force and f not in umap:
1577 if force and f not in umap:
1578 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1578 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1579 remove.append(f)
1579 remove.append(f)
1580 elif n == m1.get(f, nullid): # same as parent
1580 elif n == m1.get(f, nullid): # same as parent
1581 if p2 == pa: # going backwards?
1581 if p2 == pa: # going backwards?
1582 self.ui.debug(_("remote deleted %s\n") % f)
1582 self.ui.debug(_("remote deleted %s\n") % f)
1583 remove.append(f)
1583 remove.append(f)
1584 else:
1584 else:
1585 self.ui.debug(_("local modified %s, keeping\n") % f)
1585 self.ui.debug(_("local modified %s, keeping\n") % f)
1586 else:
1586 else:
1587 self.ui.debug(_("working dir created %s, keeping\n") % f)
1587 self.ui.debug(_("working dir created %s, keeping\n") % f)
1588
1588
1589 for f, n in m2.iteritems():
1589 for f, n in m2.iteritems():
1590 if choose and not choose(f):
1590 if choose and not choose(f):
1591 continue
1591 continue
1592 if f[0] == "/":
1592 if f[0] == "/":
1593 continue
1593 continue
1594 if f in ma and n != ma[f]:
1594 if f in ma and n != ma[f]:
1595 r = _("k")
1595 r = _("k")
1596 if not force and (linear_path or allow):
1596 if not force and (linear_path or allow):
1597 r = self.ui.prompt(
1597 r = self.ui.prompt(
1598 (_("remote changed %s which local deleted\n") % f) +
1598 (_("remote changed %s which local deleted\n") % f) +
1599 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1599 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1600 if r == _("k"):
1600 if r == _("k"):
1601 get[f] = n
1601 get[f] = n
1602 elif f not in ma:
1602 elif f not in ma:
1603 self.ui.debug(_("remote created %s\n") % f)
1603 self.ui.debug(_("remote created %s\n") % f)
1604 get[f] = n
1604 get[f] = n
1605 else:
1605 else:
1606 if force or p2 == pa: # going backwards?
1606 if force or p2 == pa: # going backwards?
1607 self.ui.debug(_("local deleted %s, recreating\n") % f)
1607 self.ui.debug(_("local deleted %s, recreating\n") % f)
1608 get[f] = n
1608 get[f] = n
1609 else:
1609 else:
1610 self.ui.debug(_("local deleted %s\n") % f)
1610 self.ui.debug(_("local deleted %s\n") % f)
1611
1611
1612 del mw, m1, m2, ma
1612 del mw, m1, m2, ma
1613
1613
1614 if force:
1614 if force:
1615 for f in merge:
1615 for f in merge:
1616 get[f] = merge[f][1]
1616 get[f] = merge[f][1]
1617 merge = {}
1617 merge = {}
1618
1618
1619 if linear_path or force:
1619 if linear_path or force:
1620 # we don't need to do any magic, just jump to the new rev
1620 # we don't need to do any magic, just jump to the new rev
1621 branch_merge = False
1621 branch_merge = False
1622 p1, p2 = p2, nullid
1622 p1, p2 = p2, nullid
1623 else:
1623 else:
1624 if not allow:
1624 if not allow:
1625 self.ui.status(_("this update spans a branch"
1625 self.ui.status(_("this update spans a branch"
1626 " affecting the following files:\n"))
1626 " affecting the following files:\n"))
1627 fl = merge.keys() + get.keys()
1627 fl = merge.keys() + get.keys()
1628 fl.sort()
1628 fl.sort()
1629 for f in fl:
1629 for f in fl:
1630 cf = ""
1630 cf = ""
1631 if f in merge:
1631 if f in merge:
1632 cf = _(" (resolve)")
1632 cf = _(" (resolve)")
1633 self.ui.status(" %s%s\n" % (f, cf))
1633 self.ui.status(" %s%s\n" % (f, cf))
1634 self.ui.warn(_("aborting update spanning branches!\n"))
1634 self.ui.warn(_("aborting update spanning branches!\n"))
1635 self.ui.status(_("(use update -m to merge across branches"
1635 self.ui.status(_("(use update -m to merge across branches"
1636 " or -C to lose changes)\n"))
1636 " or -C to lose changes)\n"))
1637 return 1
1637 return 1
1638 branch_merge = True
1638 branch_merge = True
1639
1639
1640 # get the files we don't need to change
1640 # get the files we don't need to change
1641 files = get.keys()
1641 files = get.keys()
1642 files.sort()
1642 files.sort()
1643 for f in files:
1643 for f in files:
1644 if f[0] == "/":
1644 if f[0] == "/":
1645 continue
1645 continue
1646 self.ui.note(_("getting %s\n") % f)
1646 self.ui.note(_("getting %s\n") % f)
1647 t = self.file(f).read(get[f])
1647 t = self.file(f).read(get[f])
1648 self.wwrite(f, t)
1648 self.wwrite(f, t)
1649 util.set_exec(self.wjoin(f), mf2[f])
1649 util.set_exec(self.wjoin(f), mf2[f])
1650 if moddirstate:
1650 if moddirstate:
1651 if branch_merge:
1651 if branch_merge:
1652 self.dirstate.update([f], 'n', st_mtime=-1)
1652 self.dirstate.update([f], 'n', st_mtime=-1)
1653 else:
1653 else:
1654 self.dirstate.update([f], 'n')
1654 self.dirstate.update([f], 'n')
1655
1655
1656 # merge the tricky bits
1656 # merge the tricky bits
1657 files = merge.keys()
1657 files = merge.keys()
1658 files.sort()
1658 files.sort()
1659 for f in files:
1659 for f in files:
1660 self.ui.status(_("merging %s\n") % f)
1660 self.ui.status(_("merging %s\n") % f)
1661 my, other, flag = merge[f]
1661 my, other, flag = merge[f]
1662 ret = self.merge3(f, my, other)
1662 ret = self.merge3(f, my, other)
1663 if ret:
1663 if ret:
1664 err = True
1664 err = True
1665 util.set_exec(self.wjoin(f), flag)
1665 util.set_exec(self.wjoin(f), flag)
1666 if moddirstate:
1666 if moddirstate:
1667 if branch_merge:
1667 if branch_merge:
1668 # We've done a branch merge, mark this file as merged
1668 # We've done a branch merge, mark this file as merged
1669 # so that we properly record the merger later
1669 # so that we properly record the merger later
1670 self.dirstate.update([f], 'm')
1670 self.dirstate.update([f], 'm')
1671 else:
1671 else:
1672 # We've update-merged a locally modified file, so
1672 # We've update-merged a locally modified file, so
1673 # we set the dirstate to emulate a normal checkout
1673 # we set the dirstate to emulate a normal checkout
1674 # of that file some time in the past. Thus our
1674 # of that file some time in the past. Thus our
1675 # merge will appear as a normal local file
1675 # merge will appear as a normal local file
1676 # modification.
1676 # modification.
1677 f_len = len(self.file(f).read(other))
1677 f_len = len(self.file(f).read(other))
1678 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1678 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1679
1679
1680 remove.sort()
1680 remove.sort()
1681 for f in remove:
1681 for f in remove:
1682 self.ui.note(_("removing %s\n") % f)
1682 self.ui.note(_("removing %s\n") % f)
1683 util.audit_path(f)
1683 util.audit_path(f)
1684 try:
1684 try:
1685 util.unlink(self.wjoin(f))
1685 util.unlink(self.wjoin(f))
1686 except OSError, inst:
1686 except OSError, inst:
1687 if inst.errno != errno.ENOENT:
1687 if inst.errno != errno.ENOENT:
1688 self.ui.warn(_("update failed to remove %s: %s!\n") %
1688 self.ui.warn(_("update failed to remove %s: %s!\n") %
1689 (f, inst.strerror))
1689 (f, inst.strerror))
1690 if moddirstate:
1690 if moddirstate:
1691 if branch_merge:
1691 if branch_merge:
1692 self.dirstate.update(remove, 'r')
1692 self.dirstate.update(remove, 'r')
1693 else:
1693 else:
1694 self.dirstate.forget(remove)
1694 self.dirstate.forget(remove)
1695
1695
1696 if moddirstate:
1696 if moddirstate:
1697 self.dirstate.setparents(p1, p2)
1697 self.dirstate.setparents(p1, p2)
1698 return err
1698 return err
1699
1699
1700 def merge3(self, fn, my, other):
1700 def merge3(self, fn, my, other):
1701 """perform a 3-way merge in the working directory"""
1701 """perform a 3-way merge in the working directory"""
1702
1702
1703 def temp(prefix, node):
1703 def temp(prefix, node):
1704 pre = "%s~%s." % (os.path.basename(fn), prefix)
1704 pre = "%s~%s." % (os.path.basename(fn), prefix)
1705 (fd, name) = tempfile.mkstemp("", pre)
1705 (fd, name) = tempfile.mkstemp("", pre)
1706 f = os.fdopen(fd, "wb")
1706 f = os.fdopen(fd, "wb")
1707 self.wwrite(fn, fl.read(node), f)
1707 self.wwrite(fn, fl.read(node), f)
1708 f.close()
1708 f.close()
1709 return name
1709 return name
1710
1710
1711 fl = self.file(fn)
1711 fl = self.file(fn)
1712 base = fl.ancestor(my, other)
1712 base = fl.ancestor(my, other)
1713 a = self.wjoin(fn)
1713 a = self.wjoin(fn)
1714 b = temp("base", base)
1714 b = temp("base", base)
1715 c = temp("other", other)
1715 c = temp("other", other)
1716
1716
1717 self.ui.note(_("resolving %s\n") % fn)
1717 self.ui.note(_("resolving %s\n") % fn)
1718 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1718 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1719 (fn, short(my), short(other), short(base)))
1719 (fn, short(my), short(other), short(base)))
1720
1720
1721 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1721 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1722 or "hgmerge")
1722 or "hgmerge")
1723 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1723 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1724 if r:
1724 if r:
1725 self.ui.warn(_("merging %s failed!\n") % fn)
1725 self.ui.warn(_("merging %s failed!\n") % fn)
1726
1726
1727 os.unlink(b)
1727 os.unlink(b)
1728 os.unlink(c)
1728 os.unlink(c)
1729 return r
1729 return r
1730
1730
1731 def verify(self):
1731 def verify(self):
1732 filelinkrevs = {}
1732 filelinkrevs = {}
1733 filenodes = {}
1733 filenodes = {}
1734 changesets = revisions = files = 0
1734 changesets = revisions = files = 0
1735 errors = [0]
1735 errors = [0]
1736 neededmanifests = {}
1736 neededmanifests = {}
1737
1737
1738 def err(msg):
1738 def err(msg):
1739 self.ui.warn(msg + "\n")
1739 self.ui.warn(msg + "\n")
1740 errors[0] += 1
1740 errors[0] += 1
1741
1741
1742 def checksize(obj, name):
1742 def checksize(obj, name):
1743 d = obj.checksize()
1743 d = obj.checksize()
1744 if d[0]:
1744 if d[0]:
1745 err(_("%s data length off by %d bytes") % (name, d[0]))
1745 err(_("%s data length off by %d bytes") % (name, d[0]))
1746 if d[1]:
1746 if d[1]:
1747 err(_("%s index contains %d extra bytes") % (name, d[1]))
1747 err(_("%s index contains %d extra bytes") % (name, d[1]))
1748
1748
1749 seen = {}
1749 seen = {}
1750 self.ui.status(_("checking changesets\n"))
1750 self.ui.status(_("checking changesets\n"))
1751 checksize(self.changelog, "changelog")
1751 checksize(self.changelog, "changelog")
1752
1752
1753 for i in range(self.changelog.count()):
1753 for i in range(self.changelog.count()):
1754 changesets += 1
1754 changesets += 1
1755 n = self.changelog.node(i)
1755 n = self.changelog.node(i)
1756 l = self.changelog.linkrev(n)
1756 l = self.changelog.linkrev(n)
1757 if l != i:
1757 if l != i:
1758 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1758 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1759 if n in seen:
1759 if n in seen:
1760 err(_("duplicate changeset at revision %d") % i)
1760 err(_("duplicate changeset at revision %d") % i)
1761 seen[n] = 1
1761 seen[n] = 1
1762
1762
1763 for p in self.changelog.parents(n):
1763 for p in self.changelog.parents(n):
1764 if p not in self.changelog.nodemap:
1764 if p not in self.changelog.nodemap:
1765 err(_("changeset %s has unknown parent %s") %
1765 err(_("changeset %s has unknown parent %s") %
1766 (short(n), short(p)))
1766 (short(n), short(p)))
1767 try:
1767 try:
1768 changes = self.changelog.read(n)
1768 changes = self.changelog.read(n)
1769 except KeyboardInterrupt:
1769 except KeyboardInterrupt:
1770 self.ui.warn(_("interrupted"))
1770 self.ui.warn(_("interrupted"))
1771 raise
1771 raise
1772 except Exception, inst:
1772 except Exception, inst:
1773 err(_("unpacking changeset %s: %s") % (short(n), inst))
1773 err(_("unpacking changeset %s: %s") % (short(n), inst))
1774 continue
1774
1775
1775 neededmanifests[changes[0]] = n
1776 neededmanifests[changes[0]] = n
1776
1777
1777 for f in changes[3]:
1778 for f in changes[3]:
1778 filelinkrevs.setdefault(f, []).append(i)
1779 filelinkrevs.setdefault(f, []).append(i)
1779
1780
1780 seen = {}
1781 seen = {}
1781 self.ui.status(_("checking manifests\n"))
1782 self.ui.status(_("checking manifests\n"))
1782 checksize(self.manifest, "manifest")
1783 checksize(self.manifest, "manifest")
1783
1784
1784 for i in range(self.manifest.count()):
1785 for i in range(self.manifest.count()):
1785 n = self.manifest.node(i)
1786 n = self.manifest.node(i)
1786 l = self.manifest.linkrev(n)
1787 l = self.manifest.linkrev(n)
1787
1788
1788 if l < 0 or l >= self.changelog.count():
1789 if l < 0 or l >= self.changelog.count():
1789 err(_("bad manifest link (%d) at revision %d") % (l, i))
1790 err(_("bad manifest link (%d) at revision %d") % (l, i))
1790
1791
1791 if n in neededmanifests:
1792 if n in neededmanifests:
1792 del neededmanifests[n]
1793 del neededmanifests[n]
1793
1794
1794 if n in seen:
1795 if n in seen:
1795 err(_("duplicate manifest at revision %d") % i)
1796 err(_("duplicate manifest at revision %d") % i)
1796
1797
1797 seen[n] = 1
1798 seen[n] = 1
1798
1799
1799 for p in self.manifest.parents(n):
1800 for p in self.manifest.parents(n):
1800 if p not in self.manifest.nodemap:
1801 if p not in self.manifest.nodemap:
1801 err(_("manifest %s has unknown parent %s") %
1802 err(_("manifest %s has unknown parent %s") %
1802 (short(n), short(p)))
1803 (short(n), short(p)))
1803
1804
1804 try:
1805 try:
1805 delta = mdiff.patchtext(self.manifest.delta(n))
1806 delta = mdiff.patchtext(self.manifest.delta(n))
1806 except KeyboardInterrupt:
1807 except KeyboardInterrupt:
1807 self.ui.warn(_("interrupted"))
1808 self.ui.warn(_("interrupted"))
1808 raise
1809 raise
1809 except Exception, inst:
1810 except Exception, inst:
1810 err(_("unpacking manifest %s: %s") % (short(n), inst))
1811 err(_("unpacking manifest %s: %s") % (short(n), inst))
1812 continue
1811
1813
1812 ff = [ l.split('\0') for l in delta.splitlines() ]
1814 try:
1813 for f, fn in ff:
1815 ff = [ l.split('\0') for l in delta.splitlines() ]
1814 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1816 for f, fn in ff:
1817 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1818 except (ValueError, TypeError), inst:
1819 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1815
1820
1816 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1821 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1817
1822
1818 for m, c in neededmanifests.items():
1823 for m, c in neededmanifests.items():
1819 err(_("Changeset %s refers to unknown manifest %s") %
1824 err(_("Changeset %s refers to unknown manifest %s") %
1820 (short(m), short(c)))
1825 (short(m), short(c)))
1821 del neededmanifests
1826 del neededmanifests
1822
1827
1823 for f in filenodes:
1828 for f in filenodes:
1824 if f not in filelinkrevs:
1829 if f not in filelinkrevs:
1825 err(_("file %s in manifest but not in changesets") % f)
1830 err(_("file %s in manifest but not in changesets") % f)
1826
1831
1827 for f in filelinkrevs:
1832 for f in filelinkrevs:
1828 if f not in filenodes:
1833 if f not in filenodes:
1829 err(_("file %s in changeset but not in manifest") % f)
1834 err(_("file %s in changeset but not in manifest") % f)
1830
1835
1831 self.ui.status(_("checking files\n"))
1836 self.ui.status(_("checking files\n"))
1832 ff = filenodes.keys()
1837 ff = filenodes.keys()
1833 ff.sort()
1838 ff.sort()
1834 for f in ff:
1839 for f in ff:
1835 if f == "/dev/null":
1840 if f == "/dev/null":
1836 continue
1841 continue
1837 files += 1
1842 files += 1
1843 if not f:
1844 err(_("file without name in manifest %s") % short(n))
1845 continue
1838 fl = self.file(f)
1846 fl = self.file(f)
1839 checksize(fl, f)
1847 checksize(fl, f)
1840
1848
1841 nodes = {nullid: 1}
1849 nodes = {nullid: 1}
1842 seen = {}
1850 seen = {}
1843 for i in range(fl.count()):
1851 for i in range(fl.count()):
1844 revisions += 1
1852 revisions += 1
1845 n = fl.node(i)
1853 n = fl.node(i)
1846
1854
1847 if n in seen:
1855 if n in seen:
1848 err(_("%s: duplicate revision %d") % (f, i))
1856 err(_("%s: duplicate revision %d") % (f, i))
1849 if n not in filenodes[f]:
1857 if n not in filenodes[f]:
1850 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1858 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1851 else:
1859 else:
1852 del filenodes[f][n]
1860 del filenodes[f][n]
1853
1861
1854 flr = fl.linkrev(n)
1862 flr = fl.linkrev(n)
1855 if flr not in filelinkrevs[f]:
1863 if flr not in filelinkrevs.get(f, []):
1856 err(_("%s:%s points to unexpected changeset %d")
1864 err(_("%s:%s points to unexpected changeset %d")
1857 % (f, short(n), flr))
1865 % (f, short(n), flr))
1858 else:
1866 else:
1859 filelinkrevs[f].remove(flr)
1867 filelinkrevs[f].remove(flr)
1860
1868
1861 # verify contents
1869 # verify contents
1862 try:
1870 try:
1863 t = fl.read(n)
1871 t = fl.read(n)
1864 except KeyboardInterrupt:
1872 except KeyboardInterrupt:
1865 self.ui.warn(_("interrupted"))
1873 self.ui.warn(_("interrupted"))
1866 raise
1874 raise
1867 except Exception, inst:
1875 except Exception, inst:
1868 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1876 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1869
1877
1870 # verify parents
1878 # verify parents
1871 (p1, p2) = fl.parents(n)
1879 (p1, p2) = fl.parents(n)
1872 if p1 not in nodes:
1880 if p1 not in nodes:
1873 err(_("file %s:%s unknown parent 1 %s") %
1881 err(_("file %s:%s unknown parent 1 %s") %
1874 (f, short(n), short(p1)))
1882 (f, short(n), short(p1)))
1875 if p2 not in nodes:
1883 if p2 not in nodes:
1876 err(_("file %s:%s unknown parent 2 %s") %
1884 err(_("file %s:%s unknown parent 2 %s") %
1877 (f, short(n), short(p1)))
1885 (f, short(n), short(p1)))
1878 nodes[n] = 1
1886 nodes[n] = 1
1879
1887
1880 # cross-check
1888 # cross-check
1881 for node in filenodes[f]:
1889 for node in filenodes[f]:
1882 err(_("node %s in manifests not in %s") % (hex(node), f))
1890 err(_("node %s in manifests not in %s") % (hex(node), f))
1883
1891
1884 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1892 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1885 (files, changesets, revisions))
1893 (files, changesets, revisions))
1886
1894
1887 if errors[0]:
1895 if errors[0]:
1888 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1896 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1889 return 1
1897 return 1
1890
1898
1891 # used to avoid circular references so destructors work
1899 # used to avoid circular references so destructors work
1892 def aftertrans(base):
1900 def aftertrans(base):
1893 p = base
1901 p = base
1894 def a():
1902 def a():
1895 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1903 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1896 util.rename(os.path.join(p, "journal.dirstate"),
1904 util.rename(os.path.join(p, "journal.dirstate"),
1897 os.path.join(p, "undo.dirstate"))
1905 os.path.join(p, "undo.dirstate"))
1898 return a
1906 return a
1899
1907
@@ -1,882 +1,882 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii errno heapq mdiff os sha struct zlib")
16 demandload(globals(), "binascii errno heapq mdiff os sha struct zlib")
17
17
18 def hash(text, p1, p2):
18 def hash(text, p1, p2):
19 """generate a hash from the given text and its parent hashes
19 """generate a hash from the given text and its parent hashes
20
20
21 This hash combines both the current file contents and its history
21 This hash combines both the current file contents and its history
22 in a manner that makes it easy to distinguish nodes with the same
22 in a manner that makes it easy to distinguish nodes with the same
23 content in the revision graph.
23 content in the revision graph.
24 """
24 """
25 l = [p1, p2]
25 l = [p1, p2]
26 l.sort()
26 l.sort()
27 s = sha.new(l[0])
27 s = sha.new(l[0])
28 s.update(l[1])
28 s.update(l[1])
29 s.update(text)
29 s.update(text)
30 return s.digest()
30 return s.digest()
31
31
32 def compress(text):
32 def compress(text):
33 """ generate a possibly-compressed representation of text """
33 """ generate a possibly-compressed representation of text """
34 if not text: return ("", text)
34 if not text: return ("", text)
35 if len(text) < 44:
35 if len(text) < 44:
36 if text[0] == '\0': return ("", text)
36 if text[0] == '\0': return ("", text)
37 return ('u', text)
37 return ('u', text)
38 bin = zlib.compress(text)
38 bin = zlib.compress(text)
39 if len(bin) > len(text):
39 if len(bin) > len(text):
40 if text[0] == '\0': return ("", text)
40 if text[0] == '\0': return ("", text)
41 return ('u', text)
41 return ('u', text)
42 return ("", bin)
42 return ("", bin)
43
43
44 def decompress(bin):
44 def decompress(bin):
45 """ decompress the given input """
45 """ decompress the given input """
46 if not bin: return bin
46 if not bin: return bin
47 t = bin[0]
47 t = bin[0]
48 if t == '\0': return bin
48 if t == '\0': return bin
49 if t == 'x': return zlib.decompress(bin)
49 if t == 'x': return zlib.decompress(bin)
50 if t == 'u': return bin[1:]
50 if t == 'u': return bin[1:]
51 raise RevlogError(_("unknown compression type %s") % t)
51 raise RevlogError(_("unknown compression type %r") % t)
52
52
53 indexformat = ">4l20s20s20s"
53 indexformat = ">4l20s20s20s"
54
54
55 class lazyparser(object):
55 class lazyparser(object):
56 """
56 """
57 this class avoids the need to parse the entirety of large indices
57 this class avoids the need to parse the entirety of large indices
58
58
59 By default we parse and load 1000 entries at a time.
59 By default we parse and load 1000 entries at a time.
60
60
61 If no position is specified, we load the whole index, and replace
61 If no position is specified, we load the whole index, and replace
62 the lazy objects in revlog with the underlying objects for
62 the lazy objects in revlog with the underlying objects for
63 efficiency in cases where we look at most of the nodes.
63 efficiency in cases where we look at most of the nodes.
64 """
64 """
65 def __init__(self, data, revlog):
65 def __init__(self, data, revlog):
66 self.data = data
66 self.data = data
67 self.s = struct.calcsize(indexformat)
67 self.s = struct.calcsize(indexformat)
68 self.l = len(data)/self.s
68 self.l = len(data)/self.s
69 self.index = [None] * self.l
69 self.index = [None] * self.l
70 self.map = {nullid: -1}
70 self.map = {nullid: -1}
71 self.all = 0
71 self.all = 0
72 self.revlog = revlog
72 self.revlog = revlog
73
73
74 def trunc(self, pos):
74 def trunc(self, pos):
75 self.l = pos/self.s
75 self.l = pos/self.s
76
76
77 def load(self, pos=None):
77 def load(self, pos=None):
78 if self.all: return
78 if self.all: return
79 if pos is not None:
79 if pos is not None:
80 block = pos / 1000
80 block = pos / 1000
81 i = block * 1000
81 i = block * 1000
82 end = min(self.l, i + 1000)
82 end = min(self.l, i + 1000)
83 else:
83 else:
84 self.all = 1
84 self.all = 1
85 i = 0
85 i = 0
86 end = self.l
86 end = self.l
87 self.revlog.index = self.index
87 self.revlog.index = self.index
88 self.revlog.nodemap = self.map
88 self.revlog.nodemap = self.map
89
89
90 while i < end:
90 while i < end:
91 d = self.data[i * self.s: (i + 1) * self.s]
91 d = self.data[i * self.s: (i + 1) * self.s]
92 e = struct.unpack(indexformat, d)
92 e = struct.unpack(indexformat, d)
93 self.index[i] = e
93 self.index[i] = e
94 self.map[e[6]] = i
94 self.map[e[6]] = i
95 i += 1
95 i += 1
96
96
97 class lazyindex(object):
97 class lazyindex(object):
98 """a lazy version of the index array"""
98 """a lazy version of the index array"""
99 def __init__(self, parser):
99 def __init__(self, parser):
100 self.p = parser
100 self.p = parser
101 def __len__(self):
101 def __len__(self):
102 return len(self.p.index)
102 return len(self.p.index)
103 def load(self, pos):
103 def load(self, pos):
104 if pos < 0:
104 if pos < 0:
105 pos += len(self.p.index)
105 pos += len(self.p.index)
106 self.p.load(pos)
106 self.p.load(pos)
107 return self.p.index[pos]
107 return self.p.index[pos]
108 def __getitem__(self, pos):
108 def __getitem__(self, pos):
109 return self.p.index[pos] or self.load(pos)
109 return self.p.index[pos] or self.load(pos)
110 def __delitem__(self, pos):
110 def __delitem__(self, pos):
111 del self.p.index[pos]
111 del self.p.index[pos]
112 def append(self, e):
112 def append(self, e):
113 self.p.index.append(e)
113 self.p.index.append(e)
114 def trunc(self, pos):
114 def trunc(self, pos):
115 self.p.trunc(pos)
115 self.p.trunc(pos)
116
116
117 class lazymap(object):
117 class lazymap(object):
118 """a lazy version of the node map"""
118 """a lazy version of the node map"""
119 def __init__(self, parser):
119 def __init__(self, parser):
120 self.p = parser
120 self.p = parser
121 def load(self, key):
121 def load(self, key):
122 if self.p.all: return
122 if self.p.all: return
123 n = self.p.data.find(key)
123 n = self.p.data.find(key)
124 if n < 0:
124 if n < 0:
125 raise KeyError(key)
125 raise KeyError(key)
126 pos = n / self.p.s
126 pos = n / self.p.s
127 self.p.load(pos)
127 self.p.load(pos)
128 def __contains__(self, key):
128 def __contains__(self, key):
129 self.p.load()
129 self.p.load()
130 return key in self.p.map
130 return key in self.p.map
131 def __iter__(self):
131 def __iter__(self):
132 yield nullid
132 yield nullid
133 for i in xrange(self.p.l):
133 for i in xrange(self.p.l):
134 try:
134 try:
135 yield self.p.index[i][6]
135 yield self.p.index[i][6]
136 except:
136 except:
137 self.p.load(i)
137 self.p.load(i)
138 yield self.p.index[i][6]
138 yield self.p.index[i][6]
139 def __getitem__(self, key):
139 def __getitem__(self, key):
140 try:
140 try:
141 return self.p.map[key]
141 return self.p.map[key]
142 except KeyError:
142 except KeyError:
143 try:
143 try:
144 self.load(key)
144 self.load(key)
145 return self.p.map[key]
145 return self.p.map[key]
146 except KeyError:
146 except KeyError:
147 raise KeyError("node " + hex(key))
147 raise KeyError("node " + hex(key))
148 def __setitem__(self, key, val):
148 def __setitem__(self, key, val):
149 self.p.map[key] = val
149 self.p.map[key] = val
150 def __delitem__(self, key):
150 def __delitem__(self, key):
151 del self.p.map[key]
151 del self.p.map[key]
152
152
153 class RevlogError(Exception): pass
153 class RevlogError(Exception): pass
154
154
155 class revlog(object):
155 class revlog(object):
156 """
156 """
157 the underlying revision storage object
157 the underlying revision storage object
158
158
159 A revlog consists of two parts, an index and the revision data.
159 A revlog consists of two parts, an index and the revision data.
160
160
161 The index is a file with a fixed record size containing
161 The index is a file with a fixed record size containing
162 information on each revision, includings its nodeid (hash), the
162 information on each revision, includings its nodeid (hash), the
163 nodeids of its parents, the position and offset of its data within
163 nodeids of its parents, the position and offset of its data within
164 the data file, and the revision it's based on. Finally, each entry
164 the data file, and the revision it's based on. Finally, each entry
165 contains a linkrev entry that can serve as a pointer to external
165 contains a linkrev entry that can serve as a pointer to external
166 data.
166 data.
167
167
168 The revision data itself is a linear collection of data chunks.
168 The revision data itself is a linear collection of data chunks.
169 Each chunk represents a revision and is usually represented as a
169 Each chunk represents a revision and is usually represented as a
170 delta against the previous chunk. To bound lookup time, runs of
170 delta against the previous chunk. To bound lookup time, runs of
171 deltas are limited to about 2 times the length of the original
171 deltas are limited to about 2 times the length of the original
172 version data. This makes retrieval of a version proportional to
172 version data. This makes retrieval of a version proportional to
173 its size, or O(1) relative to the number of revisions.
173 its size, or O(1) relative to the number of revisions.
174
174
175 Both pieces of the revlog are written to in an append-only
175 Both pieces of the revlog are written to in an append-only
176 fashion, which means we never need to rewrite a file to insert or
176 fashion, which means we never need to rewrite a file to insert or
177 remove data, and can use some simple techniques to avoid the need
177 remove data, and can use some simple techniques to avoid the need
178 for locking while reading.
178 for locking while reading.
179 """
179 """
180 def __init__(self, opener, indexfile, datafile):
180 def __init__(self, opener, indexfile, datafile):
181 """
181 """
182 create a revlog object
182 create a revlog object
183
183
184 opener is a function that abstracts the file opening operation
184 opener is a function that abstracts the file opening operation
185 and can be used to implement COW semantics or the like.
185 and can be used to implement COW semantics or the like.
186 """
186 """
187 self.indexfile = indexfile
187 self.indexfile = indexfile
188 self.datafile = datafile
188 self.datafile = datafile
189 self.opener = opener
189 self.opener = opener
190
190
191 self.indexstat = None
191 self.indexstat = None
192 self.cache = None
192 self.cache = None
193 self.chunkcache = None
193 self.chunkcache = None
194 self.load()
194 self.load()
195
195
196 def load(self):
196 def load(self):
197 try:
197 try:
198 f = self.opener(self.indexfile)
198 f = self.opener(self.indexfile)
199 except IOError, inst:
199 except IOError, inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 i = ""
202 i = ""
203 else:
203 else:
204 try:
204 try:
205 st = os.fstat(f.fileno())
205 st = os.fstat(f.fileno())
206 except AttributeError, inst:
206 except AttributeError, inst:
207 st = None
207 st = None
208 else:
208 else:
209 oldst = self.indexstat
209 oldst = self.indexstat
210 if (oldst and st.st_dev == oldst.st_dev
210 if (oldst and st.st_dev == oldst.st_dev
211 and st.st_ino == oldst.st_ino
211 and st.st_ino == oldst.st_ino
212 and st.st_mtime == oldst.st_mtime
212 and st.st_mtime == oldst.st_mtime
213 and st.st_ctime == oldst.st_ctime):
213 and st.st_ctime == oldst.st_ctime):
214 return
214 return
215 self.indexstat = st
215 self.indexstat = st
216 i = f.read()
216 i = f.read()
217
217
218 if i and i[:4] != "\0\0\0\0":
218 if i and i[:4] != "\0\0\0\0":
219 raise RevlogError(_("incompatible revlog signature on %s") %
219 raise RevlogError(_("incompatible revlog signature on %s") %
220 self.indexfile)
220 self.indexfile)
221
221
222 if len(i) > 10000:
222 if len(i) > 10000:
223 # big index, let's parse it on demand
223 # big index, let's parse it on demand
224 parser = lazyparser(i, self)
224 parser = lazyparser(i, self)
225 self.index = lazyindex(parser)
225 self.index = lazyindex(parser)
226 self.nodemap = lazymap(parser)
226 self.nodemap = lazymap(parser)
227 else:
227 else:
228 s = struct.calcsize(indexformat)
228 s = struct.calcsize(indexformat)
229 l = len(i) / s
229 l = len(i) / s
230 self.index = [None] * l
230 self.index = [None] * l
231 m = [None] * l
231 m = [None] * l
232
232
233 n = 0
233 n = 0
234 for f in xrange(0, l * s, s):
234 for f in xrange(0, l * s, s):
235 # offset, size, base, linkrev, p1, p2, nodeid
235 # offset, size, base, linkrev, p1, p2, nodeid
236 e = struct.unpack(indexformat, i[f:f + s])
236 e = struct.unpack(indexformat, i[f:f + s])
237 m[n] = (e[6], n)
237 m[n] = (e[6], n)
238 self.index[n] = e
238 self.index[n] = e
239 n += 1
239 n += 1
240
240
241 self.nodemap = dict(m)
241 self.nodemap = dict(m)
242 self.nodemap[nullid] = -1
242 self.nodemap[nullid] = -1
243
243
244 def tip(self): return self.node(len(self.index) - 1)
244 def tip(self): return self.node(len(self.index) - 1)
245 def count(self): return len(self.index)
245 def count(self): return len(self.index)
246 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
246 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
247 def rev(self, node):
247 def rev(self, node):
248 try:
248 try:
249 return self.nodemap[node]
249 return self.nodemap[node]
250 except KeyError:
250 except KeyError:
251 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
251 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
252 def linkrev(self, node): return self.index[self.rev(node)][3]
252 def linkrev(self, node): return self.index[self.rev(node)][3]
253 def parents(self, node):
253 def parents(self, node):
254 if node == nullid: return (nullid, nullid)
254 if node == nullid: return (nullid, nullid)
255 return self.index[self.rev(node)][4:6]
255 return self.index[self.rev(node)][4:6]
256
256
257 def start(self, rev): return self.index[rev][0]
257 def start(self, rev): return self.index[rev][0]
258 def length(self, rev): return self.index[rev][1]
258 def length(self, rev): return self.index[rev][1]
259 def end(self, rev): return self.start(rev) + self.length(rev)
259 def end(self, rev): return self.start(rev) + self.length(rev)
260 def base(self, rev): return self.index[rev][2]
260 def base(self, rev): return self.index[rev][2]
261
261
262 def reachable(self, rev, stop=None):
262 def reachable(self, rev, stop=None):
263 reachable = {}
263 reachable = {}
264 visit = [rev]
264 visit = [rev]
265 reachable[rev] = 1
265 reachable[rev] = 1
266 if stop:
266 if stop:
267 stopn = self.rev(stop)
267 stopn = self.rev(stop)
268 else:
268 else:
269 stopn = 0
269 stopn = 0
270 while visit:
270 while visit:
271 n = visit.pop(0)
271 n = visit.pop(0)
272 if n == stop:
272 if n == stop:
273 continue
273 continue
274 if n == nullid:
274 if n == nullid:
275 continue
275 continue
276 for p in self.parents(n):
276 for p in self.parents(n):
277 if self.rev(p) < stopn:
277 if self.rev(p) < stopn:
278 continue
278 continue
279 if p not in reachable:
279 if p not in reachable:
280 reachable[p] = 1
280 reachable[p] = 1
281 visit.append(p)
281 visit.append(p)
282 return reachable
282 return reachable
283
283
284 def nodesbetween(self, roots=None, heads=None):
284 def nodesbetween(self, roots=None, heads=None):
285 """Return a tuple containing three elements. Elements 1 and 2 contain
285 """Return a tuple containing three elements. Elements 1 and 2 contain
286 a final list bases and heads after all the unreachable ones have been
286 a final list bases and heads after all the unreachable ones have been
287 pruned. Element 0 contains a topologically sorted list of all
287 pruned. Element 0 contains a topologically sorted list of all
288
288
289 nodes that satisfy these constraints:
289 nodes that satisfy these constraints:
290 1. All nodes must be descended from a node in roots (the nodes on
290 1. All nodes must be descended from a node in roots (the nodes on
291 roots are considered descended from themselves).
291 roots are considered descended from themselves).
292 2. All nodes must also be ancestors of a node in heads (the nodes in
292 2. All nodes must also be ancestors of a node in heads (the nodes in
293 heads are considered to be their own ancestors).
293 heads are considered to be their own ancestors).
294
294
295 If roots is unspecified, nullid is assumed as the only root.
295 If roots is unspecified, nullid is assumed as the only root.
296 If heads is unspecified, it is taken to be the output of the
296 If heads is unspecified, it is taken to be the output of the
297 heads method (i.e. a list of all nodes in the repository that
297 heads method (i.e. a list of all nodes in the repository that
298 have no children)."""
298 have no children)."""
299 nonodes = ([], [], [])
299 nonodes = ([], [], [])
300 if roots is not None:
300 if roots is not None:
301 roots = list(roots)
301 roots = list(roots)
302 if not roots:
302 if not roots:
303 return nonodes
303 return nonodes
304 lowestrev = min([self.rev(n) for n in roots])
304 lowestrev = min([self.rev(n) for n in roots])
305 else:
305 else:
306 roots = [nullid] # Everybody's a descendent of nullid
306 roots = [nullid] # Everybody's a descendent of nullid
307 lowestrev = -1
307 lowestrev = -1
308 if (lowestrev == -1) and (heads is None):
308 if (lowestrev == -1) and (heads is None):
309 # We want _all_ the nodes!
309 # We want _all_ the nodes!
310 return ([self.node(r) for r in xrange(0, self.count())],
310 return ([self.node(r) for r in xrange(0, self.count())],
311 [nullid], list(self.heads()))
311 [nullid], list(self.heads()))
312 if heads is None:
312 if heads is None:
313 # All nodes are ancestors, so the latest ancestor is the last
313 # All nodes are ancestors, so the latest ancestor is the last
314 # node.
314 # node.
315 highestrev = self.count() - 1
315 highestrev = self.count() - 1
316 # Set ancestors to None to signal that every node is an ancestor.
316 # Set ancestors to None to signal that every node is an ancestor.
317 ancestors = None
317 ancestors = None
318 # Set heads to an empty dictionary for later discovery of heads
318 # Set heads to an empty dictionary for later discovery of heads
319 heads = {}
319 heads = {}
320 else:
320 else:
321 heads = list(heads)
321 heads = list(heads)
322 if not heads:
322 if not heads:
323 return nonodes
323 return nonodes
324 ancestors = {}
324 ancestors = {}
325 # Start at the top and keep marking parents until we're done.
325 # Start at the top and keep marking parents until we're done.
326 nodestotag = heads[:]
326 nodestotag = heads[:]
327 # Turn heads into a dictionary so we can remove 'fake' heads.
327 # Turn heads into a dictionary so we can remove 'fake' heads.
328 # Also, later we will be using it to filter out the heads we can't
328 # Also, later we will be using it to filter out the heads we can't
329 # find from roots.
329 # find from roots.
330 heads = dict.fromkeys(heads, 0)
330 heads = dict.fromkeys(heads, 0)
331 # Remember where the top was so we can use it as a limit later.
331 # Remember where the top was so we can use it as a limit later.
332 highestrev = max([self.rev(n) for n in nodestotag])
332 highestrev = max([self.rev(n) for n in nodestotag])
333 while nodestotag:
333 while nodestotag:
334 # grab a node to tag
334 # grab a node to tag
335 n = nodestotag.pop()
335 n = nodestotag.pop()
336 # Never tag nullid
336 # Never tag nullid
337 if n == nullid:
337 if n == nullid:
338 continue
338 continue
339 # A node's revision number represents its place in a
339 # A node's revision number represents its place in a
340 # topologically sorted list of nodes.
340 # topologically sorted list of nodes.
341 r = self.rev(n)
341 r = self.rev(n)
342 if r >= lowestrev:
342 if r >= lowestrev:
343 if n not in ancestors:
343 if n not in ancestors:
344 # If we are possibly a descendent of one of the roots
344 # If we are possibly a descendent of one of the roots
345 # and we haven't already been marked as an ancestor
345 # and we haven't already been marked as an ancestor
346 ancestors[n] = 1 # Mark as ancestor
346 ancestors[n] = 1 # Mark as ancestor
347 # Add non-nullid parents to list of nodes to tag.
347 # Add non-nullid parents to list of nodes to tag.
348 nodestotag.extend([p for p in self.parents(n) if
348 nodestotag.extend([p for p in self.parents(n) if
349 p != nullid])
349 p != nullid])
350 elif n in heads: # We've seen it before, is it a fake head?
350 elif n in heads: # We've seen it before, is it a fake head?
351 # So it is, real heads should not be the ancestors of
351 # So it is, real heads should not be the ancestors of
352 # any other heads.
352 # any other heads.
353 heads.pop(n)
353 heads.pop(n)
354 if not ancestors:
354 if not ancestors:
355 return nonodes
355 return nonodes
356 # Now that we have our set of ancestors, we want to remove any
356 # Now that we have our set of ancestors, we want to remove any
357 # roots that are not ancestors.
357 # roots that are not ancestors.
358
358
359 # If one of the roots was nullid, everything is included anyway.
359 # If one of the roots was nullid, everything is included anyway.
360 if lowestrev > -1:
360 if lowestrev > -1:
361 # But, since we weren't, let's recompute the lowest rev to not
361 # But, since we weren't, let's recompute the lowest rev to not
362 # include roots that aren't ancestors.
362 # include roots that aren't ancestors.
363
363
364 # Filter out roots that aren't ancestors of heads
364 # Filter out roots that aren't ancestors of heads
365 roots = [n for n in roots if n in ancestors]
365 roots = [n for n in roots if n in ancestors]
366 # Recompute the lowest revision
366 # Recompute the lowest revision
367 if roots:
367 if roots:
368 lowestrev = min([self.rev(n) for n in roots])
368 lowestrev = min([self.rev(n) for n in roots])
369 else:
369 else:
370 # No more roots? Return empty list
370 # No more roots? Return empty list
371 return nonodes
371 return nonodes
372 else:
372 else:
373 # We are descending from nullid, and don't need to care about
373 # We are descending from nullid, and don't need to care about
374 # any other roots.
374 # any other roots.
375 lowestrev = -1
375 lowestrev = -1
376 roots = [nullid]
376 roots = [nullid]
377 # Transform our roots list into a 'set' (i.e. a dictionary where the
377 # Transform our roots list into a 'set' (i.e. a dictionary where the
378 # values don't matter.
378 # values don't matter.
379 descendents = dict.fromkeys(roots, 1)
379 descendents = dict.fromkeys(roots, 1)
380 # Also, keep the original roots so we can filter out roots that aren't
380 # Also, keep the original roots so we can filter out roots that aren't
381 # 'real' roots (i.e. are descended from other roots).
381 # 'real' roots (i.e. are descended from other roots).
382 roots = descendents.copy()
382 roots = descendents.copy()
383 # Our topologically sorted list of output nodes.
383 # Our topologically sorted list of output nodes.
384 orderedout = []
384 orderedout = []
385 # Don't start at nullid since we don't want nullid in our output list,
385 # Don't start at nullid since we don't want nullid in our output list,
386 # and if nullid shows up in descedents, empty parents will look like
386 # and if nullid shows up in descedents, empty parents will look like
387 # they're descendents.
387 # they're descendents.
388 for r in xrange(max(lowestrev, 0), highestrev + 1):
388 for r in xrange(max(lowestrev, 0), highestrev + 1):
389 n = self.node(r)
389 n = self.node(r)
390 isdescendent = False
390 isdescendent = False
391 if lowestrev == -1: # Everybody is a descendent of nullid
391 if lowestrev == -1: # Everybody is a descendent of nullid
392 isdescendent = True
392 isdescendent = True
393 elif n in descendents:
393 elif n in descendents:
394 # n is already a descendent
394 # n is already a descendent
395 isdescendent = True
395 isdescendent = True
396 # This check only needs to be done here because all the roots
396 # This check only needs to be done here because all the roots
397 # will start being marked is descendents before the loop.
397 # will start being marked is descendents before the loop.
398 if n in roots:
398 if n in roots:
399 # If n was a root, check if it's a 'real' root.
399 # If n was a root, check if it's a 'real' root.
400 p = tuple(self.parents(n))
400 p = tuple(self.parents(n))
401 # If any of its parents are descendents, it's not a root.
401 # If any of its parents are descendents, it's not a root.
402 if (p[0] in descendents) or (p[1] in descendents):
402 if (p[0] in descendents) or (p[1] in descendents):
403 roots.pop(n)
403 roots.pop(n)
404 else:
404 else:
405 p = tuple(self.parents(n))
405 p = tuple(self.parents(n))
406 # A node is a descendent if either of its parents are
406 # A node is a descendent if either of its parents are
407 # descendents. (We seeded the dependents list with the roots
407 # descendents. (We seeded the dependents list with the roots
408 # up there, remember?)
408 # up there, remember?)
409 if (p[0] in descendents) or (p[1] in descendents):
409 if (p[0] in descendents) or (p[1] in descendents):
410 descendents[n] = 1
410 descendents[n] = 1
411 isdescendent = True
411 isdescendent = True
412 if isdescendent and ((ancestors is None) or (n in ancestors)):
412 if isdescendent and ((ancestors is None) or (n in ancestors)):
413 # Only include nodes that are both descendents and ancestors.
413 # Only include nodes that are both descendents and ancestors.
414 orderedout.append(n)
414 orderedout.append(n)
415 if (ancestors is not None) and (n in heads):
415 if (ancestors is not None) and (n in heads):
416 # We're trying to figure out which heads are reachable
416 # We're trying to figure out which heads are reachable
417 # from roots.
417 # from roots.
418 # Mark this head as having been reached
418 # Mark this head as having been reached
419 heads[n] = 1
419 heads[n] = 1
420 elif ancestors is None:
420 elif ancestors is None:
421 # Otherwise, we're trying to discover the heads.
421 # Otherwise, we're trying to discover the heads.
422 # Assume this is a head because if it isn't, the next step
422 # Assume this is a head because if it isn't, the next step
423 # will eventually remove it.
423 # will eventually remove it.
424 heads[n] = 1
424 heads[n] = 1
425 # But, obviously its parents aren't.
425 # But, obviously its parents aren't.
426 for p in self.parents(n):
426 for p in self.parents(n):
427 heads.pop(p, None)
427 heads.pop(p, None)
428 heads = [n for n in heads.iterkeys() if heads[n] != 0]
428 heads = [n for n in heads.iterkeys() if heads[n] != 0]
429 roots = roots.keys()
429 roots = roots.keys()
430 assert orderedout
430 assert orderedout
431 assert roots
431 assert roots
432 assert heads
432 assert heads
433 return (orderedout, roots, heads)
433 return (orderedout, roots, heads)
434
434
435 def heads(self, start=None):
435 def heads(self, start=None):
436 """return the list of all nodes that have no children
436 """return the list of all nodes that have no children
437
437
438 if start is specified, only heads that are descendants of
438 if start is specified, only heads that are descendants of
439 start will be returned
439 start will be returned
440
440
441 """
441 """
442 if start is None:
442 if start is None:
443 start = nullid
443 start = nullid
444 reachable = {start: 1}
444 reachable = {start: 1}
445 heads = {start: 1}
445 heads = {start: 1}
446 startrev = self.rev(start)
446 startrev = self.rev(start)
447
447
448 for r in xrange(startrev + 1, self.count()):
448 for r in xrange(startrev + 1, self.count()):
449 n = self.node(r)
449 n = self.node(r)
450 for pn in self.parents(n):
450 for pn in self.parents(n):
451 if pn in reachable:
451 if pn in reachable:
452 reachable[n] = 1
452 reachable[n] = 1
453 heads[n] = 1
453 heads[n] = 1
454 if pn in heads:
454 if pn in heads:
455 del heads[pn]
455 del heads[pn]
456 return heads.keys()
456 return heads.keys()
457
457
458 def children(self, node):
458 def children(self, node):
459 """find the children of a given node"""
459 """find the children of a given node"""
460 c = []
460 c = []
461 p = self.rev(node)
461 p = self.rev(node)
462 for r in range(p + 1, self.count()):
462 for r in range(p + 1, self.count()):
463 n = self.node(r)
463 n = self.node(r)
464 for pn in self.parents(n):
464 for pn in self.parents(n):
465 if pn == node:
465 if pn == node:
466 c.append(n)
466 c.append(n)
467 continue
467 continue
468 elif pn == nullid:
468 elif pn == nullid:
469 continue
469 continue
470 return c
470 return c
471
471
472 def lookup(self, id):
472 def lookup(self, id):
473 """locate a node based on revision number or subset of hex nodeid"""
473 """locate a node based on revision number or subset of hex nodeid"""
474 try:
474 try:
475 rev = int(id)
475 rev = int(id)
476 if str(rev) != id: raise ValueError
476 if str(rev) != id: raise ValueError
477 if rev < 0: rev = self.count() + rev
477 if rev < 0: rev = self.count() + rev
478 if rev < 0 or rev >= self.count(): raise ValueError
478 if rev < 0 or rev >= self.count(): raise ValueError
479 return self.node(rev)
479 return self.node(rev)
480 except (ValueError, OverflowError):
480 except (ValueError, OverflowError):
481 c = []
481 c = []
482 for n in self.nodemap:
482 for n in self.nodemap:
483 if hex(n).startswith(id):
483 if hex(n).startswith(id):
484 c.append(n)
484 c.append(n)
485 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
485 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
486 if len(c) < 1: raise RevlogError(_("No match found"))
486 if len(c) < 1: raise RevlogError(_("No match found"))
487 return c[0]
487 return c[0]
488
488
489 return None
489 return None
490
490
491 def diff(self, a, b):
491 def diff(self, a, b):
492 """return a delta between two revisions"""
492 """return a delta between two revisions"""
493 return mdiff.textdiff(a, b)
493 return mdiff.textdiff(a, b)
494
494
495 def patches(self, t, pl):
495 def patches(self, t, pl):
496 """apply a list of patches to a string"""
496 """apply a list of patches to a string"""
497 return mdiff.patches(t, pl)
497 return mdiff.patches(t, pl)
498
498
499 def chunk(self, rev):
499 def chunk(self, rev):
500 start, length = self.start(rev), self.length(rev)
500 start, length = self.start(rev), self.length(rev)
501 end = start + length
501 end = start + length
502
502
503 def loadcache():
503 def loadcache():
504 cache_length = max(4096 * 1024, length) # 4Mo
504 cache_length = max(4096 * 1024, length) # 4Mo
505 df = self.opener(self.datafile)
505 df = self.opener(self.datafile)
506 df.seek(start)
506 df.seek(start)
507 self.chunkcache = (start, df.read(cache_length))
507 self.chunkcache = (start, df.read(cache_length))
508
508
509 if not self.chunkcache:
509 if not self.chunkcache:
510 loadcache()
510 loadcache()
511
511
512 cache_start = self.chunkcache[0]
512 cache_start = self.chunkcache[0]
513 cache_end = cache_start + len(self.chunkcache[1])
513 cache_end = cache_start + len(self.chunkcache[1])
514 if start >= cache_start and end <= cache_end:
514 if start >= cache_start and end <= cache_end:
515 # it is cached
515 # it is cached
516 offset = start - cache_start
516 offset = start - cache_start
517 else:
517 else:
518 loadcache()
518 loadcache()
519 offset = 0
519 offset = 0
520
520
521 #def checkchunk():
521 #def checkchunk():
522 # df = self.opener(self.datafile)
522 # df = self.opener(self.datafile)
523 # df.seek(start)
523 # df.seek(start)
524 # return df.read(length)
524 # return df.read(length)
525 #assert s == checkchunk()
525 #assert s == checkchunk()
526 return decompress(self.chunkcache[1][offset:offset + length])
526 return decompress(self.chunkcache[1][offset:offset + length])
527
527
528 def delta(self, node):
528 def delta(self, node):
529 """return or calculate a delta between a node and its predecessor"""
529 """return or calculate a delta between a node and its predecessor"""
530 r = self.rev(node)
530 r = self.rev(node)
531 b = self.base(r)
531 b = self.base(r)
532 if r == b:
532 if r == b:
533 return self.diff(self.revision(self.node(r - 1)),
533 return self.diff(self.revision(self.node(r - 1)),
534 self.revision(node))
534 self.revision(node))
535 else:
535 else:
536 return self.chunk(r)
536 return self.chunk(r)
537
537
538 def revision(self, node):
538 def revision(self, node):
539 """return an uncompressed revision of a given"""
539 """return an uncompressed revision of a given"""
540 if node == nullid: return ""
540 if node == nullid: return ""
541 if self.cache and self.cache[0] == node: return self.cache[2]
541 if self.cache and self.cache[0] == node: return self.cache[2]
542
542
543 # look up what we need to read
543 # look up what we need to read
544 text = None
544 text = None
545 rev = self.rev(node)
545 rev = self.rev(node)
546 base = self.base(rev)
546 base = self.base(rev)
547
547
548 # do we have useful data cached?
548 # do we have useful data cached?
549 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
549 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
550 base = self.cache[1]
550 base = self.cache[1]
551 text = self.cache[2]
551 text = self.cache[2]
552 else:
552 else:
553 text = self.chunk(base)
553 text = self.chunk(base)
554
554
555 bins = []
555 bins = []
556 for r in xrange(base + 1, rev + 1):
556 for r in xrange(base + 1, rev + 1):
557 bins.append(self.chunk(r))
557 bins.append(self.chunk(r))
558
558
559 text = mdiff.patches(text, bins)
559 text = mdiff.patches(text, bins)
560
560
561 p1, p2 = self.parents(node)
561 p1, p2 = self.parents(node)
562 if node != hash(text, p1, p2):
562 if node != hash(text, p1, p2):
563 raise RevlogError(_("integrity check failed on %s:%d")
563 raise RevlogError(_("integrity check failed on %s:%d")
564 % (self.datafile, rev))
564 % (self.datafile, rev))
565
565
566 self.cache = (node, rev, text)
566 self.cache = (node, rev, text)
567 return text
567 return text
568
568
569 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
569 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
570 """add a revision to the log
570 """add a revision to the log
571
571
572 text - the revision data to add
572 text - the revision data to add
573 transaction - the transaction object used for rollback
573 transaction - the transaction object used for rollback
574 link - the linkrev data to add
574 link - the linkrev data to add
575 p1, p2 - the parent nodeids of the revision
575 p1, p2 - the parent nodeids of the revision
576 d - an optional precomputed delta
576 d - an optional precomputed delta
577 """
577 """
578 if text is None: text = ""
578 if text is None: text = ""
579 if p1 is None: p1 = self.tip()
579 if p1 is None: p1 = self.tip()
580 if p2 is None: p2 = nullid
580 if p2 is None: p2 = nullid
581
581
582 node = hash(text, p1, p2)
582 node = hash(text, p1, p2)
583
583
584 if node in self.nodemap:
584 if node in self.nodemap:
585 return node
585 return node
586
586
587 n = self.count()
587 n = self.count()
588 t = n - 1
588 t = n - 1
589
589
590 if n:
590 if n:
591 base = self.base(t)
591 base = self.base(t)
592 start = self.start(base)
592 start = self.start(base)
593 end = self.end(t)
593 end = self.end(t)
594 if not d:
594 if not d:
595 prev = self.revision(self.tip())
595 prev = self.revision(self.tip())
596 d = self.diff(prev, str(text))
596 d = self.diff(prev, str(text))
597 data = compress(d)
597 data = compress(d)
598 l = len(data[1]) + len(data[0])
598 l = len(data[1]) + len(data[0])
599 dist = end - start + l
599 dist = end - start + l
600
600
601 # full versions are inserted when the needed deltas
601 # full versions are inserted when the needed deltas
602 # become comparable to the uncompressed text
602 # become comparable to the uncompressed text
603 if not n or dist > len(text) * 2:
603 if not n or dist > len(text) * 2:
604 data = compress(text)
604 data = compress(text)
605 l = len(data[1]) + len(data[0])
605 l = len(data[1]) + len(data[0])
606 base = n
606 base = n
607 else:
607 else:
608 base = self.base(t)
608 base = self.base(t)
609
609
610 offset = 0
610 offset = 0
611 if t >= 0:
611 if t >= 0:
612 offset = self.end(t)
612 offset = self.end(t)
613
613
614 e = (offset, l, base, link, p1, p2, node)
614 e = (offset, l, base, link, p1, p2, node)
615
615
616 self.index.append(e)
616 self.index.append(e)
617 self.nodemap[node] = n
617 self.nodemap[node] = n
618 entry = struct.pack(indexformat, *e)
618 entry = struct.pack(indexformat, *e)
619
619
620 transaction.add(self.datafile, e[0])
620 transaction.add(self.datafile, e[0])
621 f = self.opener(self.datafile, "a")
621 f = self.opener(self.datafile, "a")
622 if data[0]:
622 if data[0]:
623 f.write(data[0])
623 f.write(data[0])
624 f.write(data[1])
624 f.write(data[1])
625 transaction.add(self.indexfile, n * len(entry))
625 transaction.add(self.indexfile, n * len(entry))
626 self.opener(self.indexfile, "a").write(entry)
626 self.opener(self.indexfile, "a").write(entry)
627
627
628 self.cache = (node, n, text)
628 self.cache = (node, n, text)
629 return node
629 return node
630
630
631 def ancestor(self, a, b):
631 def ancestor(self, a, b):
632 """calculate the least common ancestor of nodes a and b"""
632 """calculate the least common ancestor of nodes a and b"""
633 # calculate the distance of every node from root
633 # calculate the distance of every node from root
634 dist = {nullid: 0}
634 dist = {nullid: 0}
635 for i in xrange(self.count()):
635 for i in xrange(self.count()):
636 n = self.node(i)
636 n = self.node(i)
637 p1, p2 = self.parents(n)
637 p1, p2 = self.parents(n)
638 dist[n] = max(dist[p1], dist[p2]) + 1
638 dist[n] = max(dist[p1], dist[p2]) + 1
639
639
640 # traverse ancestors in order of decreasing distance from root
640 # traverse ancestors in order of decreasing distance from root
641 def ancestors(node):
641 def ancestors(node):
642 # we store negative distances because heap returns smallest member
642 # we store negative distances because heap returns smallest member
643 h = [(-dist[node], node)]
643 h = [(-dist[node], node)]
644 seen = {}
644 seen = {}
645 while h:
645 while h:
646 d, n = heapq.heappop(h)
646 d, n = heapq.heappop(h)
647 if n not in seen:
647 if n not in seen:
648 seen[n] = 1
648 seen[n] = 1
649 yield (-d, n)
649 yield (-d, n)
650 for p in self.parents(n):
650 for p in self.parents(n):
651 heapq.heappush(h, (-dist[p], p))
651 heapq.heappush(h, (-dist[p], p))
652
652
653 def generations(node):
653 def generations(node):
654 sg, s = None, {}
654 sg, s = None, {}
655 for g,n in ancestors(node):
655 for g,n in ancestors(node):
656 if g != sg:
656 if g != sg:
657 if sg:
657 if sg:
658 yield sg, s
658 yield sg, s
659 sg, s = g, {n:1}
659 sg, s = g, {n:1}
660 else:
660 else:
661 s[n] = 1
661 s[n] = 1
662 yield sg, s
662 yield sg, s
663
663
664 x = generations(a)
664 x = generations(a)
665 y = generations(b)
665 y = generations(b)
666 gx = x.next()
666 gx = x.next()
667 gy = y.next()
667 gy = y.next()
668
668
669 # increment each ancestor list until it is closer to root than
669 # increment each ancestor list until it is closer to root than
670 # the other, or they match
670 # the other, or they match
671 while 1:
671 while 1:
672 #print "ancestor gen %s %s" % (gx[0], gy[0])
672 #print "ancestor gen %s %s" % (gx[0], gy[0])
673 if gx[0] == gy[0]:
673 if gx[0] == gy[0]:
674 # find the intersection
674 # find the intersection
675 i = [ n for n in gx[1] if n in gy[1] ]
675 i = [ n for n in gx[1] if n in gy[1] ]
676 if i:
676 if i:
677 return i[0]
677 return i[0]
678 else:
678 else:
679 #print "next"
679 #print "next"
680 gy = y.next()
680 gy = y.next()
681 gx = x.next()
681 gx = x.next()
682 elif gx[0] < gy[0]:
682 elif gx[0] < gy[0]:
683 #print "next y"
683 #print "next y"
684 gy = y.next()
684 gy = y.next()
685 else:
685 else:
686 #print "next x"
686 #print "next x"
687 gx = x.next()
687 gx = x.next()
688
688
689 def group(self, nodelist, lookup, infocollect=None):
689 def group(self, nodelist, lookup, infocollect=None):
690 """calculate a delta group
690 """calculate a delta group
691
691
692 Given a list of changeset revs, return a set of deltas and
692 Given a list of changeset revs, return a set of deltas and
693 metadata corresponding to nodes. the first delta is
693 metadata corresponding to nodes. the first delta is
694 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
694 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
695 have this parent as it has all history before these
695 have this parent as it has all history before these
696 changesets. parent is parent[0]
696 changesets. parent is parent[0]
697 """
697 """
698 revs = [self.rev(n) for n in nodelist]
698 revs = [self.rev(n) for n in nodelist]
699
699
700 # if we don't have any revisions touched by these changesets, bail
700 # if we don't have any revisions touched by these changesets, bail
701 if not revs:
701 if not revs:
702 yield struct.pack(">l", 0)
702 yield struct.pack(">l", 0)
703 return
703 return
704
704
705 # add the parent of the first rev
705 # add the parent of the first rev
706 p = self.parents(self.node(revs[0]))[0]
706 p = self.parents(self.node(revs[0]))[0]
707 revs.insert(0, self.rev(p))
707 revs.insert(0, self.rev(p))
708
708
709 # build deltas
709 # build deltas
710 for d in xrange(0, len(revs) - 1):
710 for d in xrange(0, len(revs) - 1):
711 a, b = revs[d], revs[d + 1]
711 a, b = revs[d], revs[d + 1]
712 na = self.node(a)
712 na = self.node(a)
713 nb = self.node(b)
713 nb = self.node(b)
714
714
715 if infocollect is not None:
715 if infocollect is not None:
716 infocollect(nb)
716 infocollect(nb)
717
717
718 # do we need to construct a new delta?
718 # do we need to construct a new delta?
719 if a + 1 != b or self.base(b) == b:
719 if a + 1 != b or self.base(b) == b:
720 ta = self.revision(na)
720 ta = self.revision(na)
721 tb = self.revision(nb)
721 tb = self.revision(nb)
722 d = self.diff(ta, tb)
722 d = self.diff(ta, tb)
723 else:
723 else:
724 d = self.chunk(b)
724 d = self.chunk(b)
725
725
726 p = self.parents(nb)
726 p = self.parents(nb)
727 meta = nb + p[0] + p[1] + lookup(nb)
727 meta = nb + p[0] + p[1] + lookup(nb)
728 l = struct.pack(">l", len(meta) + len(d) + 4)
728 l = struct.pack(">l", len(meta) + len(d) + 4)
729 yield l
729 yield l
730 yield meta
730 yield meta
731 yield d
731 yield d
732
732
733 yield struct.pack(">l", 0)
733 yield struct.pack(">l", 0)
734
734
735 def addgroup(self, revs, linkmapper, transaction, unique=0):
735 def addgroup(self, revs, linkmapper, transaction, unique=0):
736 """
736 """
737 add a delta group
737 add a delta group
738
738
739 given a set of deltas, add them to the revision log. the
739 given a set of deltas, add them to the revision log. the
740 first delta is against its parent, which should be in our
740 first delta is against its parent, which should be in our
741 log, the rest are against the previous delta.
741 log, the rest are against the previous delta.
742 """
742 """
743
743
744 #track the base of the current delta log
744 #track the base of the current delta log
745 r = self.count()
745 r = self.count()
746 t = r - 1
746 t = r - 1
747 node = nullid
747 node = nullid
748
748
749 base = prev = -1
749 base = prev = -1
750 start = end = measure = 0
750 start = end = measure = 0
751 if r:
751 if r:
752 base = self.base(t)
752 base = self.base(t)
753 start = self.start(base)
753 start = self.start(base)
754 end = self.end(t)
754 end = self.end(t)
755 measure = self.length(base)
755 measure = self.length(base)
756 prev = self.tip()
756 prev = self.tip()
757
757
758 transaction.add(self.datafile, end)
758 transaction.add(self.datafile, end)
759 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
759 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
760 dfh = self.opener(self.datafile, "a")
760 dfh = self.opener(self.datafile, "a")
761 ifh = self.opener(self.indexfile, "a")
761 ifh = self.opener(self.indexfile, "a")
762
762
763 # loop through our set of deltas
763 # loop through our set of deltas
764 chain = None
764 chain = None
765 for chunk in revs:
765 for chunk in revs:
766 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
766 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
767 link = linkmapper(cs)
767 link = linkmapper(cs)
768 if node in self.nodemap:
768 if node in self.nodemap:
769 # this can happen if two branches make the same change
769 # this can happen if two branches make the same change
770 # if unique:
770 # if unique:
771 # raise RevlogError(_("already have %s") % hex(node[:4]))
771 # raise RevlogError(_("already have %s") % hex(node[:4]))
772 chain = node
772 chain = node
773 continue
773 continue
774 delta = chunk[80:]
774 delta = chunk[80:]
775
775
776 for p in (p1, p2):
776 for p in (p1, p2):
777 if not p in self.nodemap:
777 if not p in self.nodemap:
778 raise RevlogError(_("unknown parent %s") % short(p1))
778 raise RevlogError(_("unknown parent %s") % short(p1))
779
779
780 if not chain:
780 if not chain:
781 # retrieve the parent revision of the delta chain
781 # retrieve the parent revision of the delta chain
782 chain = p1
782 chain = p1
783 if not chain in self.nodemap:
783 if not chain in self.nodemap:
784 raise RevlogError(_("unknown base %s") % short(chain[:4]))
784 raise RevlogError(_("unknown base %s") % short(chain[:4]))
785
785
786 # full versions are inserted when the needed deltas become
786 # full versions are inserted when the needed deltas become
787 # comparable to the uncompressed text or when the previous
787 # comparable to the uncompressed text or when the previous
788 # version is not the one we have a delta against. We use
788 # version is not the one we have a delta against. We use
789 # the size of the previous full rev as a proxy for the
789 # the size of the previous full rev as a proxy for the
790 # current size.
790 # current size.
791
791
792 if chain == prev:
792 if chain == prev:
793 tempd = compress(delta)
793 tempd = compress(delta)
794 cdelta = tempd[0] + tempd[1]
794 cdelta = tempd[0] + tempd[1]
795
795
796 if chain != prev or (end - start + len(cdelta)) > measure * 2:
796 if chain != prev or (end - start + len(cdelta)) > measure * 2:
797 # flush our writes here so we can read it in revision
797 # flush our writes here so we can read it in revision
798 dfh.flush()
798 dfh.flush()
799 ifh.flush()
799 ifh.flush()
800 text = self.revision(chain)
800 text = self.revision(chain)
801 text = self.patches(text, [delta])
801 text = self.patches(text, [delta])
802 chk = self.addrevision(text, transaction, link, p1, p2)
802 chk = self.addrevision(text, transaction, link, p1, p2)
803 if chk != node:
803 if chk != node:
804 raise RevlogError(_("consistency error adding group"))
804 raise RevlogError(_("consistency error adding group"))
805 measure = len(text)
805 measure = len(text)
806 else:
806 else:
807 e = (end, len(cdelta), base, link, p1, p2, node)
807 e = (end, len(cdelta), base, link, p1, p2, node)
808 self.index.append(e)
808 self.index.append(e)
809 self.nodemap[node] = r
809 self.nodemap[node] = r
810 dfh.write(cdelta)
810 dfh.write(cdelta)
811 ifh.write(struct.pack(indexformat, *e))
811 ifh.write(struct.pack(indexformat, *e))
812
812
813 t, r, chain, prev = r, r + 1, node, node
813 t, r, chain, prev = r, r + 1, node, node
814 base = self.base(t)
814 base = self.base(t)
815 start = self.start(base)
815 start = self.start(base)
816 end = self.end(t)
816 end = self.end(t)
817
817
818 dfh.close()
818 dfh.close()
819 ifh.close()
819 ifh.close()
820 return node
820 return node
821
821
822 def strip(self, rev, minlink):
822 def strip(self, rev, minlink):
823 if self.count() == 0 or rev >= self.count():
823 if self.count() == 0 or rev >= self.count():
824 return
824 return
825
825
826 # When stripping away a revision, we need to make sure it
826 # When stripping away a revision, we need to make sure it
827 # does not actually belong to an older changeset.
827 # does not actually belong to an older changeset.
828 # The minlink parameter defines the oldest revision
828 # The minlink parameter defines the oldest revision
829 # we're allowed to strip away.
829 # we're allowed to strip away.
830 while minlink > self.index[rev][3]:
830 while minlink > self.index[rev][3]:
831 rev += 1
831 rev += 1
832 if rev >= self.count():
832 if rev >= self.count():
833 return
833 return
834
834
835 # first truncate the files on disk
835 # first truncate the files on disk
836 end = self.start(rev)
836 end = self.start(rev)
837 self.opener(self.datafile, "a").truncate(end)
837 self.opener(self.datafile, "a").truncate(end)
838 end = rev * struct.calcsize(indexformat)
838 end = rev * struct.calcsize(indexformat)
839 self.opener(self.indexfile, "a").truncate(end)
839 self.opener(self.indexfile, "a").truncate(end)
840
840
841 # then reset internal state in memory to forget those revisions
841 # then reset internal state in memory to forget those revisions
842 self.cache = None
842 self.cache = None
843 self.chunkcache = None
843 self.chunkcache = None
844 for p in self.index[rev:]:
844 for p in self.index[rev:]:
845 del self.nodemap[p[6]]
845 del self.nodemap[p[6]]
846 del self.index[rev:]
846 del self.index[rev:]
847
847
848 # truncating the lazyindex also truncates the lazymap.
848 # truncating the lazyindex also truncates the lazymap.
849 if isinstance(self.index, lazyindex):
849 if isinstance(self.index, lazyindex):
850 self.index.trunc(end)
850 self.index.trunc(end)
851
851
852
852
853 def checksize(self):
853 def checksize(self):
854 expected = 0
854 expected = 0
855 if self.count():
855 if self.count():
856 expected = self.end(self.count() - 1)
856 expected = self.end(self.count() - 1)
857
857
858 try:
858 try:
859 f = self.opener(self.datafile)
859 f = self.opener(self.datafile)
860 f.seek(0, 2)
860 f.seek(0, 2)
861 actual = f.tell()
861 actual = f.tell()
862 dd = actual - expected
862 dd = actual - expected
863 except IOError, inst:
863 except IOError, inst:
864 if inst.errno != errno.ENOENT:
864 if inst.errno != errno.ENOENT:
865 raise
865 raise
866 dd = 0
866 dd = 0
867
867
868 try:
868 try:
869 f = self.opener(self.indexfile)
869 f = self.opener(self.indexfile)
870 f.seek(0, 2)
870 f.seek(0, 2)
871 actual = f.tell()
871 actual = f.tell()
872 s = struct.calcsize(indexformat)
872 s = struct.calcsize(indexformat)
873 i = actual / s
873 i = actual / s
874 di = actual - (i * s)
874 di = actual - (i * s)
875 except IOError, inst:
875 except IOError, inst:
876 if inst.errno != errno.ENOENT:
876 if inst.errno != errno.ENOENT:
877 raise
877 raise
878 di = 0
878 di = 0
879
879
880 return (dd, di)
880 return (dd, di)
881
881
882
882
General Comments 0
You need to be logged in to leave comments. Login now