##// END OF EJS Templates
This changes the revlog.group and re-implements the localrepo.changeroup...
Eric Hopper -
r1458:1033892b default
parent child Browse files
Show More
@@ -1,1420 +1,1562 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
12 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
13
13
14 class localrepository:
14 class localrepository:
15 def __init__(self, ui, path=None, create=0):
15 def __init__(self, ui, path=None, create=0):
16 if not path:
16 if not path:
17 p = os.getcwd()
17 p = os.getcwd()
18 while not os.path.isdir(os.path.join(p, ".hg")):
18 while not os.path.isdir(os.path.join(p, ".hg")):
19 oldp = p
19 oldp = p
20 p = os.path.dirname(p)
20 p = os.path.dirname(p)
21 if p == oldp: raise repo.RepoError("no repo found")
21 if p == oldp: raise repo.RepoError("no repo found")
22 path = p
22 path = p
23 self.path = os.path.join(path, ".hg")
23 self.path = os.path.join(path, ".hg")
24
24
25 if not create and not os.path.isdir(self.path):
25 if not create and not os.path.isdir(self.path):
26 raise repo.RepoError("repository %s not found" % self.path)
26 raise repo.RepoError("repository %s not found" % self.path)
27
27
28 self.root = os.path.abspath(path)
28 self.root = os.path.abspath(path)
29 self.ui = ui
29 self.ui = ui
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.manifest = manifest.manifest(self.opener)
32 self.manifest = manifest.manifest(self.opener)
33 self.changelog = changelog.changelog(self.opener)
33 self.changelog = changelog.changelog(self.opener)
34 self.tagscache = None
34 self.tagscache = None
35 self.nodetagscache = None
35 self.nodetagscache = None
36 self.encodepats = None
36 self.encodepats = None
37 self.decodepats = None
37 self.decodepats = None
38
38
39 if create:
39 if create:
40 os.mkdir(self.path)
40 os.mkdir(self.path)
41 os.mkdir(self.join("data"))
41 os.mkdir(self.join("data"))
42
42
43 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
43 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
44 try:
44 try:
45 self.ui.readconfig(self.opener("hgrc"))
45 self.ui.readconfig(self.opener("hgrc"))
46 except IOError: pass
46 except IOError: pass
47
47
48 def hook(self, name, **args):
48 def hook(self, name, **args):
49 s = self.ui.config("hooks", name)
49 s = self.ui.config("hooks", name)
50 if s:
50 if s:
51 self.ui.note("running hook %s: %s\n" % (name, s))
51 self.ui.note("running hook %s: %s\n" % (name, s))
52 old = {}
52 old = {}
53 for k, v in args.items():
53 for k, v in args.items():
54 k = k.upper()
54 k = k.upper()
55 old[k] = os.environ.get(k, None)
55 old[k] = os.environ.get(k, None)
56 os.environ[k] = v
56 os.environ[k] = v
57
57
58 # Hooks run in the repository root
58 # Hooks run in the repository root
59 olddir = os.getcwd()
59 olddir = os.getcwd()
60 os.chdir(self.root)
60 os.chdir(self.root)
61 r = os.system(s)
61 r = os.system(s)
62 os.chdir(olddir)
62 os.chdir(olddir)
63
63
64 for k, v in old.items():
64 for k, v in old.items():
65 if v != None:
65 if v != None:
66 os.environ[k] = v
66 os.environ[k] = v
67 else:
67 else:
68 del os.environ[k]
68 del os.environ[k]
69
69
70 if r:
70 if r:
71 self.ui.warn("abort: %s hook failed with status %d!\n" %
71 self.ui.warn("abort: %s hook failed with status %d!\n" %
72 (name, r))
72 (name, r))
73 return False
73 return False
74 return True
74 return True
75
75
76 def tags(self):
76 def tags(self):
77 '''return a mapping of tag to node'''
77 '''return a mapping of tag to node'''
78 if not self.tagscache:
78 if not self.tagscache:
79 self.tagscache = {}
79 self.tagscache = {}
80 def addtag(self, k, n):
80 def addtag(self, k, n):
81 try:
81 try:
82 bin_n = bin(n)
82 bin_n = bin(n)
83 except TypeError:
83 except TypeError:
84 bin_n = ''
84 bin_n = ''
85 self.tagscache[k.strip()] = bin_n
85 self.tagscache[k.strip()] = bin_n
86
86
87 try:
87 try:
88 # read each head of the tags file, ending with the tip
88 # read each head of the tags file, ending with the tip
89 # and add each tag found to the map, with "newer" ones
89 # and add each tag found to the map, with "newer" ones
90 # taking precedence
90 # taking precedence
91 fl = self.file(".hgtags")
91 fl = self.file(".hgtags")
92 h = fl.heads()
92 h = fl.heads()
93 h.reverse()
93 h.reverse()
94 for r in h:
94 for r in h:
95 for l in fl.read(r).splitlines():
95 for l in fl.read(r).splitlines():
96 if l:
96 if l:
97 n, k = l.split(" ", 1)
97 n, k = l.split(" ", 1)
98 addtag(self, k, n)
98 addtag(self, k, n)
99 except KeyError:
99 except KeyError:
100 pass
100 pass
101
101
102 try:
102 try:
103 f = self.opener("localtags")
103 f = self.opener("localtags")
104 for l in f:
104 for l in f:
105 n, k = l.split(" ", 1)
105 n, k = l.split(" ", 1)
106 addtag(self, k, n)
106 addtag(self, k, n)
107 except IOError:
107 except IOError:
108 pass
108 pass
109
109
110 self.tagscache['tip'] = self.changelog.tip()
110 self.tagscache['tip'] = self.changelog.tip()
111
111
112 return self.tagscache
112 return self.tagscache
113
113
114 def tagslist(self):
114 def tagslist(self):
115 '''return a list of tags ordered by revision'''
115 '''return a list of tags ordered by revision'''
116 l = []
116 l = []
117 for t, n in self.tags().items():
117 for t, n in self.tags().items():
118 try:
118 try:
119 r = self.changelog.rev(n)
119 r = self.changelog.rev(n)
120 except:
120 except:
121 r = -2 # sort to the beginning of the list if unknown
121 r = -2 # sort to the beginning of the list if unknown
122 l.append((r,t,n))
122 l.append((r,t,n))
123 l.sort()
123 l.sort()
124 return [(t,n) for r,t,n in l]
124 return [(t,n) for r,t,n in l]
125
125
126 def nodetags(self, node):
126 def nodetags(self, node):
127 '''return the tags associated with a node'''
127 '''return the tags associated with a node'''
128 if not self.nodetagscache:
128 if not self.nodetagscache:
129 self.nodetagscache = {}
129 self.nodetagscache = {}
130 for t,n in self.tags().items():
130 for t,n in self.tags().items():
131 self.nodetagscache.setdefault(n,[]).append(t)
131 self.nodetagscache.setdefault(n,[]).append(t)
132 return self.nodetagscache.get(node, [])
132 return self.nodetagscache.get(node, [])
133
133
134 def lookup(self, key):
134 def lookup(self, key):
135 try:
135 try:
136 return self.tags()[key]
136 return self.tags()[key]
137 except KeyError:
137 except KeyError:
138 try:
138 try:
139 return self.changelog.lookup(key)
139 return self.changelog.lookup(key)
140 except:
140 except:
141 raise repo.RepoError("unknown revision '%s'" % key)
141 raise repo.RepoError("unknown revision '%s'" % key)
142
142
143 def dev(self):
143 def dev(self):
144 return os.stat(self.path).st_dev
144 return os.stat(self.path).st_dev
145
145
146 def local(self):
146 def local(self):
147 return True
147 return True
148
148
149 def join(self, f):
149 def join(self, f):
150 return os.path.join(self.path, f)
150 return os.path.join(self.path, f)
151
151
152 def wjoin(self, f):
152 def wjoin(self, f):
153 return os.path.join(self.root, f)
153 return os.path.join(self.root, f)
154
154
155 def file(self, f):
155 def file(self, f):
156 if f[0] == '/': f = f[1:]
156 if f[0] == '/': f = f[1:]
157 return filelog.filelog(self.opener, f)
157 return filelog.filelog(self.opener, f)
158
158
159 def getcwd(self):
159 def getcwd(self):
160 return self.dirstate.getcwd()
160 return self.dirstate.getcwd()
161
161
162 def wfile(self, f, mode='r'):
162 def wfile(self, f, mode='r'):
163 return self.wopener(f, mode)
163 return self.wopener(f, mode)
164
164
165 def wread(self, filename):
165 def wread(self, filename):
166 if self.encodepats == None:
166 if self.encodepats == None:
167 l = []
167 l = []
168 for pat, cmd in self.ui.configitems("encode"):
168 for pat, cmd in self.ui.configitems("encode"):
169 mf = util.matcher("", "/", [pat], [], [])[1]
169 mf = util.matcher("", "/", [pat], [], [])[1]
170 l.append((mf, cmd))
170 l.append((mf, cmd))
171 self.encodepats = l
171 self.encodepats = l
172
172
173 data = self.wopener(filename, 'r').read()
173 data = self.wopener(filename, 'r').read()
174
174
175 for mf, cmd in self.encodepats:
175 for mf, cmd in self.encodepats:
176 if mf(filename):
176 if mf(filename):
177 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
177 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
178 data = util.filter(data, cmd)
178 data = util.filter(data, cmd)
179 break
179 break
180
180
181 return data
181 return data
182
182
183 def wwrite(self, filename, data, fd=None):
183 def wwrite(self, filename, data, fd=None):
184 if self.decodepats == None:
184 if self.decodepats == None:
185 l = []
185 l = []
186 for pat, cmd in self.ui.configitems("decode"):
186 for pat, cmd in self.ui.configitems("decode"):
187 mf = util.matcher("", "/", [pat], [], [])[1]
187 mf = util.matcher("", "/", [pat], [], [])[1]
188 l.append((mf, cmd))
188 l.append((mf, cmd))
189 self.decodepats = l
189 self.decodepats = l
190
190
191 for mf, cmd in self.decodepats:
191 for mf, cmd in self.decodepats:
192 if mf(filename):
192 if mf(filename):
193 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
193 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
194 data = util.filter(data, cmd)
194 data = util.filter(data, cmd)
195 break
195 break
196
196
197 if fd:
197 if fd:
198 return fd.write(data)
198 return fd.write(data)
199 return self.wopener(filename, 'w').write(data)
199 return self.wopener(filename, 'w').write(data)
200
200
201 def transaction(self):
201 def transaction(self):
202 # save dirstate for undo
202 # save dirstate for undo
203 try:
203 try:
204 ds = self.opener("dirstate").read()
204 ds = self.opener("dirstate").read()
205 except IOError:
205 except IOError:
206 ds = ""
206 ds = ""
207 self.opener("journal.dirstate", "w").write(ds)
207 self.opener("journal.dirstate", "w").write(ds)
208
208
209 def after():
209 def after():
210 util.rename(self.join("journal"), self.join("undo"))
210 util.rename(self.join("journal"), self.join("undo"))
211 util.rename(self.join("journal.dirstate"),
211 util.rename(self.join("journal.dirstate"),
212 self.join("undo.dirstate"))
212 self.join("undo.dirstate"))
213
213
214 return transaction.transaction(self.ui.warn, self.opener,
214 return transaction.transaction(self.ui.warn, self.opener,
215 self.join("journal"), after)
215 self.join("journal"), after)
216
216
217 def recover(self):
217 def recover(self):
218 lock = self.lock()
218 lock = self.lock()
219 if os.path.exists(self.join("journal")):
219 if os.path.exists(self.join("journal")):
220 self.ui.status("rolling back interrupted transaction\n")
220 self.ui.status("rolling back interrupted transaction\n")
221 return transaction.rollback(self.opener, self.join("journal"))
221 return transaction.rollback(self.opener, self.join("journal"))
222 else:
222 else:
223 self.ui.warn("no interrupted transaction available\n")
223 self.ui.warn("no interrupted transaction available\n")
224
224
225 def undo(self):
225 def undo(self):
226 lock = self.lock()
226 lock = self.lock()
227 if os.path.exists(self.join("undo")):
227 if os.path.exists(self.join("undo")):
228 self.ui.status("rolling back last transaction\n")
228 self.ui.status("rolling back last transaction\n")
229 transaction.rollback(self.opener, self.join("undo"))
229 transaction.rollback(self.opener, self.join("undo"))
230 self.dirstate = None
230 self.dirstate = None
231 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
231 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
232 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
232 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
233 else:
233 else:
234 self.ui.warn("no undo information available\n")
234 self.ui.warn("no undo information available\n")
235
235
236 def lock(self, wait=1):
236 def lock(self, wait=1):
237 try:
237 try:
238 return lock.lock(self.join("lock"), 0)
238 return lock.lock(self.join("lock"), 0)
239 except lock.LockHeld, inst:
239 except lock.LockHeld, inst:
240 if wait:
240 if wait:
241 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
241 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
242 return lock.lock(self.join("lock"), wait)
242 return lock.lock(self.join("lock"), wait)
243 raise inst
243 raise inst
244
244
245 def rawcommit(self, files, text, user, date, p1=None, p2=None):
245 def rawcommit(self, files, text, user, date, p1=None, p2=None):
246 orig_parent = self.dirstate.parents()[0] or nullid
246 orig_parent = self.dirstate.parents()[0] or nullid
247 p1 = p1 or self.dirstate.parents()[0] or nullid
247 p1 = p1 or self.dirstate.parents()[0] or nullid
248 p2 = p2 or self.dirstate.parents()[1] or nullid
248 p2 = p2 or self.dirstate.parents()[1] or nullid
249 c1 = self.changelog.read(p1)
249 c1 = self.changelog.read(p1)
250 c2 = self.changelog.read(p2)
250 c2 = self.changelog.read(p2)
251 m1 = self.manifest.read(c1[0])
251 m1 = self.manifest.read(c1[0])
252 mf1 = self.manifest.readflags(c1[0])
252 mf1 = self.manifest.readflags(c1[0])
253 m2 = self.manifest.read(c2[0])
253 m2 = self.manifest.read(c2[0])
254 changed = []
254 changed = []
255
255
256 if orig_parent == p1:
256 if orig_parent == p1:
257 update_dirstate = 1
257 update_dirstate = 1
258 else:
258 else:
259 update_dirstate = 0
259 update_dirstate = 0
260
260
261 tr = self.transaction()
261 tr = self.transaction()
262 mm = m1.copy()
262 mm = m1.copy()
263 mfm = mf1.copy()
263 mfm = mf1.copy()
264 linkrev = self.changelog.count()
264 linkrev = self.changelog.count()
265 for f in files:
265 for f in files:
266 try:
266 try:
267 t = self.wread(f)
267 t = self.wread(f)
268 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
268 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
269 r = self.file(f)
269 r = self.file(f)
270 mfm[f] = tm
270 mfm[f] = tm
271
271
272 fp1 = m1.get(f, nullid)
272 fp1 = m1.get(f, nullid)
273 fp2 = m2.get(f, nullid)
273 fp2 = m2.get(f, nullid)
274
274
275 # is the same revision on two branches of a merge?
275 # is the same revision on two branches of a merge?
276 if fp2 == fp1:
276 if fp2 == fp1:
277 fp2 = nullid
277 fp2 = nullid
278
278
279 if fp2 != nullid:
279 if fp2 != nullid:
280 # is one parent an ancestor of the other?
280 # is one parent an ancestor of the other?
281 fpa = r.ancestor(fp1, fp2)
281 fpa = r.ancestor(fp1, fp2)
282 if fpa == fp1:
282 if fpa == fp1:
283 fp1, fp2 = fp2, nullid
283 fp1, fp2 = fp2, nullid
284 elif fpa == fp2:
284 elif fpa == fp2:
285 fp2 = nullid
285 fp2 = nullid
286
286
287 # is the file unmodified from the parent?
287 # is the file unmodified from the parent?
288 if t == r.read(fp1):
288 if t == r.read(fp1):
289 # record the proper existing parent in manifest
289 # record the proper existing parent in manifest
290 # no need to add a revision
290 # no need to add a revision
291 mm[f] = fp1
291 mm[f] = fp1
292 continue
292 continue
293
293
294 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
294 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
295 changed.append(f)
295 changed.append(f)
296 if update_dirstate:
296 if update_dirstate:
297 self.dirstate.update([f], "n")
297 self.dirstate.update([f], "n")
298 except IOError:
298 except IOError:
299 try:
299 try:
300 del mm[f]
300 del mm[f]
301 del mfm[f]
301 del mfm[f]
302 if update_dirstate:
302 if update_dirstate:
303 self.dirstate.forget([f])
303 self.dirstate.forget([f])
304 except:
304 except:
305 # deleted from p2?
305 # deleted from p2?
306 pass
306 pass
307
307
308 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
308 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
309 user = user or self.ui.username()
309 user = user or self.ui.username()
310 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
310 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
311 tr.close()
311 tr.close()
312 if update_dirstate:
312 if update_dirstate:
313 self.dirstate.setparents(n, nullid)
313 self.dirstate.setparents(n, nullid)
314
314
315 def commit(self, files = None, text = "", user = None, date = None,
315 def commit(self, files = None, text = "", user = None, date = None,
316 match = util.always, force=False):
316 match = util.always, force=False):
317 commit = []
317 commit = []
318 remove = []
318 remove = []
319 changed = []
319 changed = []
320
320
321 if files:
321 if files:
322 for f in files:
322 for f in files:
323 s = self.dirstate.state(f)
323 s = self.dirstate.state(f)
324 if s in 'nmai':
324 if s in 'nmai':
325 commit.append(f)
325 commit.append(f)
326 elif s == 'r':
326 elif s == 'r':
327 remove.append(f)
327 remove.append(f)
328 else:
328 else:
329 self.ui.warn("%s not tracked!\n" % f)
329 self.ui.warn("%s not tracked!\n" % f)
330 else:
330 else:
331 (c, a, d, u) = self.changes(match=match)
331 (c, a, d, u) = self.changes(match=match)
332 commit = c + a
332 commit = c + a
333 remove = d
333 remove = d
334
334
335 p1, p2 = self.dirstate.parents()
335 p1, p2 = self.dirstate.parents()
336 c1 = self.changelog.read(p1)
336 c1 = self.changelog.read(p1)
337 c2 = self.changelog.read(p2)
337 c2 = self.changelog.read(p2)
338 m1 = self.manifest.read(c1[0])
338 m1 = self.manifest.read(c1[0])
339 mf1 = self.manifest.readflags(c1[0])
339 mf1 = self.manifest.readflags(c1[0])
340 m2 = self.manifest.read(c2[0])
340 m2 = self.manifest.read(c2[0])
341
341
342 if not commit and not remove and not force and p2 == nullid:
342 if not commit and not remove and not force and p2 == nullid:
343 self.ui.status("nothing changed\n")
343 self.ui.status("nothing changed\n")
344 return None
344 return None
345
345
346 if not self.hook("precommit"):
346 if not self.hook("precommit"):
347 return None
347 return None
348
348
349 lock = self.lock()
349 lock = self.lock()
350 tr = self.transaction()
350 tr = self.transaction()
351
351
352 # check in files
352 # check in files
353 new = {}
353 new = {}
354 linkrev = self.changelog.count()
354 linkrev = self.changelog.count()
355 commit.sort()
355 commit.sort()
356 for f in commit:
356 for f in commit:
357 self.ui.note(f + "\n")
357 self.ui.note(f + "\n")
358 try:
358 try:
359 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
359 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
360 t = self.wread(f)
360 t = self.wread(f)
361 except IOError:
361 except IOError:
362 self.ui.warn("trouble committing %s!\n" % f)
362 self.ui.warn("trouble committing %s!\n" % f)
363 raise
363 raise
364
364
365 r = self.file(f)
365 r = self.file(f)
366
366
367 meta = {}
367 meta = {}
368 cp = self.dirstate.copied(f)
368 cp = self.dirstate.copied(f)
369 if cp:
369 if cp:
370 meta["copy"] = cp
370 meta["copy"] = cp
371 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
371 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
372 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
372 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
373 fp1, fp2 = nullid, nullid
373 fp1, fp2 = nullid, nullid
374 else:
374 else:
375 fp1 = m1.get(f, nullid)
375 fp1 = m1.get(f, nullid)
376 fp2 = m2.get(f, nullid)
376 fp2 = m2.get(f, nullid)
377
377
378 # is the same revision on two branches of a merge?
378 # is the same revision on two branches of a merge?
379 if fp2 == fp1:
379 if fp2 == fp1:
380 fp2 = nullid
380 fp2 = nullid
381
381
382 if fp2 != nullid:
382 if fp2 != nullid:
383 # is one parent an ancestor of the other?
383 # is one parent an ancestor of the other?
384 fpa = r.ancestor(fp1, fp2)
384 fpa = r.ancestor(fp1, fp2)
385 if fpa == fp1:
385 if fpa == fp1:
386 fp1, fp2 = fp2, nullid
386 fp1, fp2 = fp2, nullid
387 elif fpa == fp2:
387 elif fpa == fp2:
388 fp2 = nullid
388 fp2 = nullid
389
389
390 # is the file unmodified from the parent?
390 # is the file unmodified from the parent?
391 if not meta and t == r.read(fp1):
391 if not meta and t == r.read(fp1):
392 # record the proper existing parent in manifest
392 # record the proper existing parent in manifest
393 # no need to add a revision
393 # no need to add a revision
394 new[f] = fp1
394 new[f] = fp1
395 continue
395 continue
396
396
397 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
397 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
398 # remember what we've added so that we can later calculate
398 # remember what we've added so that we can later calculate
399 # the files to pull from a set of changesets
399 # the files to pull from a set of changesets
400 changed.append(f)
400 changed.append(f)
401
401
402 # update manifest
402 # update manifest
403 m1.update(new)
403 m1.update(new)
404 for f in remove:
404 for f in remove:
405 if f in m1:
405 if f in m1:
406 del m1[f]
406 del m1[f]
407 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
407 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
408 (new, remove))
408 (new, remove))
409
409
410 # add changeset
410 # add changeset
411 new = new.keys()
411 new = new.keys()
412 new.sort()
412 new.sort()
413
413
414 if not text:
414 if not text:
415 edittext = ""
415 edittext = ""
416 if p2 != nullid:
416 if p2 != nullid:
417 edittext += "HG: branch merge\n"
417 edittext += "HG: branch merge\n"
418 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
418 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
419 edittext += "".join(["HG: changed %s\n" % f for f in changed])
419 edittext += "".join(["HG: changed %s\n" % f for f in changed])
420 edittext += "".join(["HG: removed %s\n" % f for f in remove])
420 edittext += "".join(["HG: removed %s\n" % f for f in remove])
421 if not changed and not remove:
421 if not changed and not remove:
422 edittext += "HG: no files changed\n"
422 edittext += "HG: no files changed\n"
423 edittext = self.ui.edit(edittext)
423 edittext = self.ui.edit(edittext)
424 if not edittext.rstrip():
424 if not edittext.rstrip():
425 return None
425 return None
426 text = edittext
426 text = edittext
427
427
428 user = user or self.ui.username()
428 user = user or self.ui.username()
429 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
429 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
430 tr.close()
430 tr.close()
431
431
432 self.dirstate.setparents(n)
432 self.dirstate.setparents(n)
433 self.dirstate.update(new, "n")
433 self.dirstate.update(new, "n")
434 self.dirstate.forget(remove)
434 self.dirstate.forget(remove)
435
435
436 if not self.hook("commit", node=hex(n)):
436 if not self.hook("commit", node=hex(n)):
437 return None
437 return None
438 return n
438 return n
439
439
440 def walk(self, node=None, files=[], match=util.always):
440 def walk(self, node=None, files=[], match=util.always):
441 if node:
441 if node:
442 for fn in self.manifest.read(self.changelog.read(node)[0]):
442 for fn in self.manifest.read(self.changelog.read(node)[0]):
443 if match(fn): yield 'm', fn
443 if match(fn): yield 'm', fn
444 else:
444 else:
445 for src, fn in self.dirstate.walk(files, match):
445 for src, fn in self.dirstate.walk(files, match):
446 yield src, fn
446 yield src, fn
447
447
448 def changes(self, node1 = None, node2 = None, files = [],
448 def changes(self, node1 = None, node2 = None, files = [],
449 match = util.always):
449 match = util.always):
450 mf2, u = None, []
450 mf2, u = None, []
451
451
452 def fcmp(fn, mf):
452 def fcmp(fn, mf):
453 t1 = self.wread(fn)
453 t1 = self.wread(fn)
454 t2 = self.file(fn).read(mf.get(fn, nullid))
454 t2 = self.file(fn).read(mf.get(fn, nullid))
455 return cmp(t1, t2)
455 return cmp(t1, t2)
456
456
457 def mfmatches(node):
457 def mfmatches(node):
458 mf = dict(self.manifest.read(node))
458 mf = dict(self.manifest.read(node))
459 for fn in mf.keys():
459 for fn in mf.keys():
460 if not match(fn):
460 if not match(fn):
461 del mf[fn]
461 del mf[fn]
462 return mf
462 return mf
463
463
464 # are we comparing the working directory?
464 # are we comparing the working directory?
465 if not node2:
465 if not node2:
466 l, c, a, d, u = self.dirstate.changes(files, match)
466 l, c, a, d, u = self.dirstate.changes(files, match)
467
467
468 # are we comparing working dir against its parent?
468 # are we comparing working dir against its parent?
469 if not node1:
469 if not node1:
470 if l:
470 if l:
471 # do a full compare of any files that might have changed
471 # do a full compare of any files that might have changed
472 change = self.changelog.read(self.dirstate.parents()[0])
472 change = self.changelog.read(self.dirstate.parents()[0])
473 mf2 = mfmatches(change[0])
473 mf2 = mfmatches(change[0])
474 for f in l:
474 for f in l:
475 if fcmp(f, mf2):
475 if fcmp(f, mf2):
476 c.append(f)
476 c.append(f)
477
477
478 for l in c, a, d, u:
478 for l in c, a, d, u:
479 l.sort()
479 l.sort()
480
480
481 return (c, a, d, u)
481 return (c, a, d, u)
482
482
483 # are we comparing working dir against non-tip?
483 # are we comparing working dir against non-tip?
484 # generate a pseudo-manifest for the working dir
484 # generate a pseudo-manifest for the working dir
485 if not node2:
485 if not node2:
486 if not mf2:
486 if not mf2:
487 change = self.changelog.read(self.dirstate.parents()[0])
487 change = self.changelog.read(self.dirstate.parents()[0])
488 mf2 = mfmatches(change[0])
488 mf2 = mfmatches(change[0])
489 for f in a + c + l:
489 for f in a + c + l:
490 mf2[f] = ""
490 mf2[f] = ""
491 for f in d:
491 for f in d:
492 if f in mf2: del mf2[f]
492 if f in mf2: del mf2[f]
493 else:
493 else:
494 change = self.changelog.read(node2)
494 change = self.changelog.read(node2)
495 mf2 = mfmatches(change[0])
495 mf2 = mfmatches(change[0])
496
496
497 # flush lists from dirstate before comparing manifests
497 # flush lists from dirstate before comparing manifests
498 c, a = [], []
498 c, a = [], []
499
499
500 change = self.changelog.read(node1)
500 change = self.changelog.read(node1)
501 mf1 = mfmatches(change[0])
501 mf1 = mfmatches(change[0])
502
502
503 for fn in mf2:
503 for fn in mf2:
504 if mf1.has_key(fn):
504 if mf1.has_key(fn):
505 if mf1[fn] != mf2[fn]:
505 if mf1[fn] != mf2[fn]:
506 if mf2[fn] != "" or fcmp(fn, mf1):
506 if mf2[fn] != "" or fcmp(fn, mf1):
507 c.append(fn)
507 c.append(fn)
508 del mf1[fn]
508 del mf1[fn]
509 else:
509 else:
510 a.append(fn)
510 a.append(fn)
511
511
512 d = mf1.keys()
512 d = mf1.keys()
513
513
514 for l in c, a, d, u:
514 for l in c, a, d, u:
515 l.sort()
515 l.sort()
516
516
517 return (c, a, d, u)
517 return (c, a, d, u)
518
518
519 def add(self, list):
519 def add(self, list):
520 for f in list:
520 for f in list:
521 p = self.wjoin(f)
521 p = self.wjoin(f)
522 if not os.path.exists(p):
522 if not os.path.exists(p):
523 self.ui.warn("%s does not exist!\n" % f)
523 self.ui.warn("%s does not exist!\n" % f)
524 elif not os.path.isfile(p):
524 elif not os.path.isfile(p):
525 self.ui.warn("%s not added: only files supported currently\n" % f)
525 self.ui.warn("%s not added: only files supported currently\n" % f)
526 elif self.dirstate.state(f) in 'an':
526 elif self.dirstate.state(f) in 'an':
527 self.ui.warn("%s already tracked!\n" % f)
527 self.ui.warn("%s already tracked!\n" % f)
528 else:
528 else:
529 self.dirstate.update([f], "a")
529 self.dirstate.update([f], "a")
530
530
531 def forget(self, list):
531 def forget(self, list):
532 for f in list:
532 for f in list:
533 if self.dirstate.state(f) not in 'ai':
533 if self.dirstate.state(f) not in 'ai':
534 self.ui.warn("%s not added!\n" % f)
534 self.ui.warn("%s not added!\n" % f)
535 else:
535 else:
536 self.dirstate.forget([f])
536 self.dirstate.forget([f])
537
537
538 def remove(self, list):
538 def remove(self, list):
539 for f in list:
539 for f in list:
540 p = self.wjoin(f)
540 p = self.wjoin(f)
541 if os.path.exists(p):
541 if os.path.exists(p):
542 self.ui.warn("%s still exists!\n" % f)
542 self.ui.warn("%s still exists!\n" % f)
543 elif self.dirstate.state(f) == 'a':
543 elif self.dirstate.state(f) == 'a':
544 self.ui.warn("%s never committed!\n" % f)
544 self.ui.warn("%s never committed!\n" % f)
545 self.dirstate.forget([f])
545 self.dirstate.forget([f])
546 elif f not in self.dirstate:
546 elif f not in self.dirstate:
547 self.ui.warn("%s not tracked!\n" % f)
547 self.ui.warn("%s not tracked!\n" % f)
548 else:
548 else:
549 self.dirstate.update([f], "r")
549 self.dirstate.update([f], "r")
550
550
551 def copy(self, source, dest):
551 def copy(self, source, dest):
552 p = self.wjoin(dest)
552 p = self.wjoin(dest)
553 if not os.path.exists(p):
553 if not os.path.exists(p):
554 self.ui.warn("%s does not exist!\n" % dest)
554 self.ui.warn("%s does not exist!\n" % dest)
555 elif not os.path.isfile(p):
555 elif not os.path.isfile(p):
556 self.ui.warn("copy failed: %s is not a file\n" % dest)
556 self.ui.warn("copy failed: %s is not a file\n" % dest)
557 else:
557 else:
558 if self.dirstate.state(dest) == '?':
558 if self.dirstate.state(dest) == '?':
559 self.dirstate.update([dest], "a")
559 self.dirstate.update([dest], "a")
560 self.dirstate.copy(source, dest)
560 self.dirstate.copy(source, dest)
561
561
562 def heads(self):
562 def heads(self):
563 return self.changelog.heads()
563 return self.changelog.heads()
564
564
565 # branchlookup returns a dict giving a list of branches for
565 # branchlookup returns a dict giving a list of branches for
566 # each head. A branch is defined as the tag of a node or
566 # each head. A branch is defined as the tag of a node or
567 # the branch of the node's parents. If a node has multiple
567 # the branch of the node's parents. If a node has multiple
568 # branch tags, tags are eliminated if they are visible from other
568 # branch tags, tags are eliminated if they are visible from other
569 # branch tags.
569 # branch tags.
570 #
570 #
571 # So, for this graph: a->b->c->d->e
571 # So, for this graph: a->b->c->d->e
572 # \ /
572 # \ /
573 # aa -----/
573 # aa -----/
574 # a has tag 2.6.12
574 # a has tag 2.6.12
575 # d has tag 2.6.13
575 # d has tag 2.6.13
576 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
576 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
577 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
577 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
578 # from the list.
578 # from the list.
579 #
579 #
580 # It is possible that more than one head will have the same branch tag.
580 # It is possible that more than one head will have the same branch tag.
581 # callers need to check the result for multiple heads under the same
581 # callers need to check the result for multiple heads under the same
582 # branch tag if that is a problem for them (ie checkout of a specific
582 # branch tag if that is a problem for them (ie checkout of a specific
583 # branch).
583 # branch).
584 #
584 #
585 # passing in a specific branch will limit the depth of the search
585 # passing in a specific branch will limit the depth of the search
586 # through the parents. It won't limit the branches returned in the
586 # through the parents. It won't limit the branches returned in the
587 # result though.
587 # result though.
588 def branchlookup(self, heads=None, branch=None):
588 def branchlookup(self, heads=None, branch=None):
589 if not heads:
589 if not heads:
590 heads = self.heads()
590 heads = self.heads()
591 headt = [ h for h in heads ]
591 headt = [ h for h in heads ]
592 chlog = self.changelog
592 chlog = self.changelog
593 branches = {}
593 branches = {}
594 merges = []
594 merges = []
595 seenmerge = {}
595 seenmerge = {}
596
596
597 # traverse the tree once for each head, recording in the branches
597 # traverse the tree once for each head, recording in the branches
598 # dict which tags are visible from this head. The branches
598 # dict which tags are visible from this head. The branches
599 # dict also records which tags are visible from each tag
599 # dict also records which tags are visible from each tag
600 # while we traverse.
600 # while we traverse.
601 while headt or merges:
601 while headt or merges:
602 if merges:
602 if merges:
603 n, found = merges.pop()
603 n, found = merges.pop()
604 visit = [n]
604 visit = [n]
605 else:
605 else:
606 h = headt.pop()
606 h = headt.pop()
607 visit = [h]
607 visit = [h]
608 found = [h]
608 found = [h]
609 seen = {}
609 seen = {}
610 while visit:
610 while visit:
611 n = visit.pop()
611 n = visit.pop()
612 if n in seen:
612 if n in seen:
613 continue
613 continue
614 pp = chlog.parents(n)
614 pp = chlog.parents(n)
615 tags = self.nodetags(n)
615 tags = self.nodetags(n)
616 if tags:
616 if tags:
617 for x in tags:
617 for x in tags:
618 if x == 'tip':
618 if x == 'tip':
619 continue
619 continue
620 for f in found:
620 for f in found:
621 branches.setdefault(f, {})[n] = 1
621 branches.setdefault(f, {})[n] = 1
622 branches.setdefault(n, {})[n] = 1
622 branches.setdefault(n, {})[n] = 1
623 break
623 break
624 if n not in found:
624 if n not in found:
625 found.append(n)
625 found.append(n)
626 if branch in tags:
626 if branch in tags:
627 continue
627 continue
628 seen[n] = 1
628 seen[n] = 1
629 if pp[1] != nullid and n not in seenmerge:
629 if pp[1] != nullid and n not in seenmerge:
630 merges.append((pp[1], [x for x in found]))
630 merges.append((pp[1], [x for x in found]))
631 seenmerge[n] = 1
631 seenmerge[n] = 1
632 if pp[0] != nullid:
632 if pp[0] != nullid:
633 visit.append(pp[0])
633 visit.append(pp[0])
634 # traverse the branches dict, eliminating branch tags from each
634 # traverse the branches dict, eliminating branch tags from each
635 # head that are visible from another branch tag for that head.
635 # head that are visible from another branch tag for that head.
636 out = {}
636 out = {}
637 viscache = {}
637 viscache = {}
638 for h in heads:
638 for h in heads:
639 def visible(node):
639 def visible(node):
640 if node in viscache:
640 if node in viscache:
641 return viscache[node]
641 return viscache[node]
642 ret = {}
642 ret = {}
643 visit = [node]
643 visit = [node]
644 while visit:
644 while visit:
645 x = visit.pop()
645 x = visit.pop()
646 if x in viscache:
646 if x in viscache:
647 ret.update(viscache[x])
647 ret.update(viscache[x])
648 elif x not in ret:
648 elif x not in ret:
649 ret[x] = 1
649 ret[x] = 1
650 if x in branches:
650 if x in branches:
651 visit[len(visit):] = branches[x].keys()
651 visit[len(visit):] = branches[x].keys()
652 viscache[node] = ret
652 viscache[node] = ret
653 return ret
653 return ret
654 if h not in branches:
654 if h not in branches:
655 continue
655 continue
656 # O(n^2), but somewhat limited. This only searches the
656 # O(n^2), but somewhat limited. This only searches the
657 # tags visible from a specific head, not all the tags in the
657 # tags visible from a specific head, not all the tags in the
658 # whole repo.
658 # whole repo.
659 for b in branches[h]:
659 for b in branches[h]:
660 vis = False
660 vis = False
661 for bb in branches[h].keys():
661 for bb in branches[h].keys():
662 if b != bb:
662 if b != bb:
663 if b in visible(bb):
663 if b in visible(bb):
664 vis = True
664 vis = True
665 break
665 break
666 if not vis:
666 if not vis:
667 l = out.setdefault(h, [])
667 l = out.setdefault(h, [])
668 l[len(l):] = self.nodetags(b)
668 l[len(l):] = self.nodetags(b)
669 return out
669 return out
670
670
671 def branches(self, nodes):
671 def branches(self, nodes):
672 if not nodes: nodes = [self.changelog.tip()]
672 if not nodes: nodes = [self.changelog.tip()]
673 b = []
673 b = []
674 for n in nodes:
674 for n in nodes:
675 t = n
675 t = n
676 while n:
676 while n:
677 p = self.changelog.parents(n)
677 p = self.changelog.parents(n)
678 if p[1] != nullid or p[0] == nullid:
678 if p[1] != nullid or p[0] == nullid:
679 b.append((t, n, p[0], p[1]))
679 b.append((t, n, p[0], p[1]))
680 break
680 break
681 n = p[0]
681 n = p[0]
682 return b
682 return b
683
683
684 def between(self, pairs):
684 def between(self, pairs):
685 r = []
685 r = []
686
686
687 for top, bottom in pairs:
687 for top, bottom in pairs:
688 n, l, i = top, [], 0
688 n, l, i = top, [], 0
689 f = 1
689 f = 1
690
690
691 while n != bottom:
691 while n != bottom:
692 p = self.changelog.parents(n)[0]
692 p = self.changelog.parents(n)[0]
693 if i == f:
693 if i == f:
694 l.append(n)
694 l.append(n)
695 f = f * 2
695 f = f * 2
696 n = p
696 n = p
697 i += 1
697 i += 1
698
698
699 r.append(l)
699 r.append(l)
700
700
701 return r
701 return r
702
702
703 def findincoming(self, remote, base=None, heads=None):
703 def findincoming(self, remote, base=None, heads=None):
704 m = self.changelog.nodemap
704 m = self.changelog.nodemap
705 search = []
705 search = []
706 fetch = {}
706 fetch = {}
707 seen = {}
707 seen = {}
708 seenbranch = {}
708 seenbranch = {}
709 if base == None:
709 if base == None:
710 base = {}
710 base = {}
711
711
712 # assume we're closer to the tip than the root
712 # assume we're closer to the tip than the root
713 # and start by examining the heads
713 # and start by examining the heads
714 self.ui.status("searching for changes\n")
714 self.ui.status("searching for changes\n")
715
715
716 if not heads:
716 if not heads:
717 heads = remote.heads()
717 heads = remote.heads()
718
718
719 unknown = []
719 unknown = []
720 for h in heads:
720 for h in heads:
721 if h not in m:
721 if h not in m:
722 unknown.append(h)
722 unknown.append(h)
723 else:
723 else:
724 base[h] = 1
724 base[h] = 1
725
725
726 if not unknown:
726 if not unknown:
727 return None
727 return None
728
728
729 rep = {}
729 rep = {}
730 reqcnt = 0
730 reqcnt = 0
731
731
732 # search through remote branches
732 # search through remote branches
733 # a 'branch' here is a linear segment of history, with four parts:
733 # a 'branch' here is a linear segment of history, with four parts:
734 # head, root, first parent, second parent
734 # head, root, first parent, second parent
735 # (a branch always has two parents (or none) by definition)
735 # (a branch always has two parents (or none) by definition)
736 unknown = remote.branches(unknown)
736 unknown = remote.branches(unknown)
737 while unknown:
737 while unknown:
738 r = []
738 r = []
739 while unknown:
739 while unknown:
740 n = unknown.pop(0)
740 n = unknown.pop(0)
741 if n[0] in seen:
741 if n[0] in seen:
742 continue
742 continue
743
743
744 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
744 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
745 if n[0] == nullid:
745 if n[0] == nullid:
746 break
746 break
747 if n in seenbranch:
747 if n in seenbranch:
748 self.ui.debug("branch already found\n")
748 self.ui.debug("branch already found\n")
749 continue
749 continue
750 if n[1] and n[1] in m: # do we know the base?
750 if n[1] and n[1] in m: # do we know the base?
751 self.ui.debug("found incomplete branch %s:%s\n"
751 self.ui.debug("found incomplete branch %s:%s\n"
752 % (short(n[0]), short(n[1])))
752 % (short(n[0]), short(n[1])))
753 search.append(n) # schedule branch range for scanning
753 search.append(n) # schedule branch range for scanning
754 seenbranch[n] = 1
754 seenbranch[n] = 1
755 else:
755 else:
756 if n[1] not in seen and n[1] not in fetch:
756 if n[1] not in seen and n[1] not in fetch:
757 if n[2] in m and n[3] in m:
757 if n[2] in m and n[3] in m:
758 self.ui.debug("found new changeset %s\n" %
758 self.ui.debug("found new changeset %s\n" %
759 short(n[1]))
759 short(n[1]))
760 fetch[n[1]] = 1 # earliest unknown
760 fetch[n[1]] = 1 # earliest unknown
761 base[n[2]] = 1 # latest known
761 base[n[2]] = 1 # latest known
762 continue
762 continue
763
763
764 for a in n[2:4]:
764 for a in n[2:4]:
765 if a not in rep:
765 if a not in rep:
766 r.append(a)
766 r.append(a)
767 rep[a] = 1
767 rep[a] = 1
768
768
769 seen[n[0]] = 1
769 seen[n[0]] = 1
770
770
771 if r:
771 if r:
772 reqcnt += 1
772 reqcnt += 1
773 self.ui.debug("request %d: %s\n" %
773 self.ui.debug("request %d: %s\n" %
774 (reqcnt, " ".join(map(short, r))))
774 (reqcnt, " ".join(map(short, r))))
775 for p in range(0, len(r), 10):
775 for p in range(0, len(r), 10):
776 for b in remote.branches(r[p:p+10]):
776 for b in remote.branches(r[p:p+10]):
777 self.ui.debug("received %s:%s\n" %
777 self.ui.debug("received %s:%s\n" %
778 (short(b[0]), short(b[1])))
778 (short(b[0]), short(b[1])))
779 if b[0] in m:
779 if b[0] in m:
780 self.ui.debug("found base node %s\n" % short(b[0]))
780 self.ui.debug("found base node %s\n" % short(b[0]))
781 base[b[0]] = 1
781 base[b[0]] = 1
782 elif b[0] not in seen:
782 elif b[0] not in seen:
783 unknown.append(b)
783 unknown.append(b)
784
784
785 # do binary search on the branches we found
785 # do binary search on the branches we found
786 while search:
786 while search:
787 n = search.pop(0)
787 n = search.pop(0)
788 reqcnt += 1
788 reqcnt += 1
789 l = remote.between([(n[0], n[1])])[0]
789 l = remote.between([(n[0], n[1])])[0]
790 l.append(n[1])
790 l.append(n[1])
791 p = n[0]
791 p = n[0]
792 f = 1
792 f = 1
793 for i in l:
793 for i in l:
794 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
794 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
795 if i in m:
795 if i in m:
796 if f <= 2:
796 if f <= 2:
797 self.ui.debug("found new branch changeset %s\n" %
797 self.ui.debug("found new branch changeset %s\n" %
798 short(p))
798 short(p))
799 fetch[p] = 1
799 fetch[p] = 1
800 base[i] = 1
800 base[i] = 1
801 else:
801 else:
802 self.ui.debug("narrowed branch search to %s:%s\n"
802 self.ui.debug("narrowed branch search to %s:%s\n"
803 % (short(p), short(i)))
803 % (short(p), short(i)))
804 search.append((p, i))
804 search.append((p, i))
805 break
805 break
806 p, f = i, f * 2
806 p, f = i, f * 2
807
807
808 # sanity check our fetch list
808 # sanity check our fetch list
809 for f in fetch.keys():
809 for f in fetch.keys():
810 if f in m:
810 if f in m:
811 raise repo.RepoError("already have changeset " + short(f[:4]))
811 raise repo.RepoError("already have changeset " + short(f[:4]))
812
812
813 if base.keys() == [nullid]:
813 if base.keys() == [nullid]:
814 self.ui.warn("warning: pulling from an unrelated repository!\n")
814 self.ui.warn("warning: pulling from an unrelated repository!\n")
815
815
816 self.ui.note("found new changesets starting at " +
816 self.ui.note("found new changesets starting at " +
817 " ".join([short(f) for f in fetch]) + "\n")
817 " ".join([short(f) for f in fetch]) + "\n")
818
818
819 self.ui.debug("%d total queries\n" % reqcnt)
819 self.ui.debug("%d total queries\n" % reqcnt)
820
820
821 return fetch.keys()
821 return fetch.keys()
822
822
823 def findoutgoing(self, remote, base=None, heads=None):
823 def findoutgoing(self, remote, base=None, heads=None):
824 if base == None:
824 if base == None:
825 base = {}
825 base = {}
826 self.findincoming(remote, base, heads)
826 self.findincoming(remote, base, heads)
827
827
828 self.ui.debug("common changesets up to "
828 self.ui.debug("common changesets up to "
829 + " ".join(map(short, base.keys())) + "\n")
829 + " ".join(map(short, base.keys())) + "\n")
830
830
831 remain = dict.fromkeys(self.changelog.nodemap)
831 remain = dict.fromkeys(self.changelog.nodemap)
832
832
833 # prune everything remote has from the tree
833 # prune everything remote has from the tree
834 del remain[nullid]
834 del remain[nullid]
835 remove = base.keys()
835 remove = base.keys()
836 while remove:
836 while remove:
837 n = remove.pop(0)
837 n = remove.pop(0)
838 if n in remain:
838 if n in remain:
839 del remain[n]
839 del remain[n]
840 for p in self.changelog.parents(n):
840 for p in self.changelog.parents(n):
841 remove.append(p)
841 remove.append(p)
842
842
843 # find every node whose parents have been pruned
843 # find every node whose parents have been pruned
844 subset = []
844 subset = []
845 for n in remain:
845 for n in remain:
846 p1, p2 = self.changelog.parents(n)
846 p1, p2 = self.changelog.parents(n)
847 if p1 not in remain and p2 not in remain:
847 if p1 not in remain and p2 not in remain:
848 subset.append(n)
848 subset.append(n)
849
849
850 # this is the set of all roots we have to push
850 # this is the set of all roots we have to push
851 return subset
851 return subset
852
852
853 def pull(self, remote):
853 def pull(self, remote):
854 lock = self.lock()
854 lock = self.lock()
855
855
856 # if we have an empty repo, fetch everything
856 # if we have an empty repo, fetch everything
857 if self.changelog.tip() == nullid:
857 if self.changelog.tip() == nullid:
858 self.ui.status("requesting all changes\n")
858 self.ui.status("requesting all changes\n")
859 fetch = [nullid]
859 fetch = [nullid]
860 else:
860 else:
861 fetch = self.findincoming(remote)
861 fetch = self.findincoming(remote)
862
862
863 if not fetch:
863 if not fetch:
864 self.ui.status("no changes found\n")
864 self.ui.status("no changes found\n")
865 return 1
865 return 1
866
866
867 cg = remote.changegroup(fetch)
867 cg = remote.changegroup(fetch)
868 return self.addchangegroup(cg)
868 return self.addchangegroup(cg)
869
869
870 def push(self, remote, force=False):
870 def push(self, remote, force=False):
871 lock = remote.lock()
871 lock = remote.lock()
872
872
873 base = {}
873 base = {}
874 heads = remote.heads()
874 heads = remote.heads()
875 inc = self.findincoming(remote, base, heads)
875 inc = self.findincoming(remote, base, heads)
876 if not force and inc:
876 if not force and inc:
877 self.ui.warn("abort: unsynced remote changes!\n")
877 self.ui.warn("abort: unsynced remote changes!\n")
878 self.ui.status("(did you forget to sync? use push -f to force)\n")
878 self.ui.status("(did you forget to sync? use push -f to force)\n")
879 return 1
879 return 1
880
880
881 update = self.findoutgoing(remote, base)
881 update = self.findoutgoing(remote, base)
882 if not update:
882 if not update:
883 self.ui.status("no changes found\n")
883 self.ui.status("no changes found\n")
884 return 1
884 return 1
885 elif not force:
885 elif not force:
886 if len(heads) < len(self.changelog.heads()):
886 if len(heads) < len(self.changelog.heads()):
887 self.ui.warn("abort: push creates new remote branches!\n")
887 self.ui.warn("abort: push creates new remote branches!\n")
888 self.ui.status("(did you forget to merge?" +
888 self.ui.status("(did you forget to merge?" +
889 " use push -f to force)\n")
889 " use push -f to force)\n")
890 return 1
890 return 1
891
891
892 cg = self.changegroup(update)
892 cg = self.changegroup(update)
893 return remote.addchangegroup(cg)
893 return remote.addchangegroup(cg)
894
894
895 def changegroup(self, basenodes):
895 def changegroupsubset(self, bases, heads):
896 genread = util.chunkbuffer
896 cl = self.changelog
897 # msng = missing
898 msng_cl_lst, bases, heads = cl.nodesbetween(basenodes, headnodes)
899 junk = None
900 knownheads = {}
901 for n in basenodes:
902 for p in cl.parents(n):
903 if p != nullid:
904 knownheads[p] = 1
905 knownheads = knownheads.keys()
906 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
907 has_cl_set = dict.fromkeys(hasnodeset)
908
909 mnfst = self.manifest
910 msng_mnfst_set = {}
911 msng_filenode_set = {}
912
913 def identity(x):
914 return x
915
916 def cmp_by_rev_func(revlog):
917 def cmpfunc(a, b):
918 return cmp(revlog.rev(a), revlog.rev(b))
919 return cmpfunc
920
921 def prune_parents(revlog, hasset, msngset):
922 haslst = hasset.keys()
923 haslst.sort(cmp_by_rev_func(revlog))
924 for node in haslst:
925 parentlst = [p for p in revlog.parents(node) if p != nullid]
926 while parentlst:
927 n = parentlst.pop()
928 if n not in hasset:
929 hasset[n] = 1
930 p = [p for p in revlog.parents(n) if p != nullid]
931 parentlst.extend(p)
932 for n in hasset:
933 msngset.pop(n, None)
934
935 def manifest_and_file_collector(changedfileset):
936 def collect_manifests_and_files(clnode):
937 c = cl.read(clnode)
938 for f in c[3]:
939 # This is to make sure we only have one instance of each
940 # filename string for each filename.
941 changedfileset.set_default(f, f)
942 msng_mnfst_set.set_default(c[0], clnode)
943 return collect_manifests_and_files
944
945 def prune_manifests():
946 has_mnfst_set = {}
947 for n in msng_mnfst_set:
948 linknode = cl.node(mnfst.linkrev(n))
949 if linknode in has_cl_set:
950 has_mnfst_set[n] = 1
951 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
952
953 def lookup_manifest_link(mnfstnode):
954 return msng_mnfst_set[mnfstnode]
955
956 def filenode_collector(changedfiles):
957 def collect_msng_filenodes(mnfstnode):
958 m = mnfst.read(mnfstnode)
959 for f in changedfiles:
960 fnode = m.get(f, None)
961 if fnode is not None:
962 clnode = msng_mnfst_set[mnfstnode]
963 ndset = msng_filenode_set.setdefault(f, {})
964 ndset.set_default(fnode, clnode)
965
966 def prune_filenodes(f, filerevlog):
967 msngset = msng_filenode_set[f]
968 hasset = {}
969 for n in msngset:
970 clnode = cl.node(filerevlog.linkrev(n))
971 if clnode in has_cl_set:
972 hasset[n] = 1
973 prune_parents(filerevlog, hasset, msngset)
974
975 def lookup_filenode_link_func(fname):
976 msngset = msng_filenode_set[fname]
977 def lookup_filenode_link(fnode):
978 return msngset[fnode]
979 return lookup_filenode_link
897
980
898 def gengroup():
981 def gengroup():
899 nodes = self.changelog.nodesbetween(basenodes)[0]
982 changedfiles = {}
983 group = cl.group(msng_cl_lst, identity,
984 manifest_and_file_collector(changedfiles))
985 for chnk in group:
986 yield chnk
987 prune_manifests()
988 msng_mnfst_lst = msng_mnfst_set.keys()
989 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
990 changedfiles = changedfiles.keys()
991 changedfiles.sort()
992 group = mnfst.group(mnfst, lookup_manifest_link,
993 filenode_collector(changedfiles))
994 for chnk in group:
995 yield chnk
996 msng_mnfst_lst = None
997 msng_mnfst_set.clear()
998 for fname in changedfiles:
999 filerevlog = self.file(fname)
1000 prune_filenodes(fname, filerevlog)
1001 msng_filenode_lst = msng_filenode_set[fname].keys()
1002 if len(msng_filenode_lst) > 0:
1003 yield struct.pack(">l", len(f) + 4) + f
1004 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1005 group = filerevlog.group(msng_filenode_lst,
1006 lookup_filenode_link)
1007 for chnk in group:
1008 yield chnk
1009 del msng_filenode_set[fname]
1010 yield struct.pack(">l", 0)
900
1011
901 # construct the link map
1012 return util.chunkbuffer(gengroup())
902 linkmap = {}
1013
903 for n in nodes:
1014 def changegroup(self, basenodes):
904 linkmap[self.changelog.rev(n)] = n
1015 cl = self.changelog
1016 nodes = cl.nodesbetween(basenodes, None)[0]
1017 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1018
1019 def identity(x):
1020 return x
905
1021
1022 def gennodelst(revlog):
1023 for r in xrange(0, revlog.count()):
1024 n = revlog.node(r)
1025 if revlog.linkrev(n) in revset:
1026 yield n
1027
1028 def changed_file_collector(changedfileset):
1029 def collect_changed_files(clnode):
1030 c = cl.read(clnode)
1031 for fname in c[3]:
1032 changedfileset[fname] = 1
1033 return collect_changed_files
1034
1035 def lookuprevlink_func(revlog):
1036 def lookuprevlink(n):
1037 return cl.node(revlog.linkrev(n))
1038 return lookuprevlink
1039
1040 def gengroup():
906 # construct a list of all changed files
1041 # construct a list of all changed files
907 changed = {}
1042 changedfiles = {}
908 for n in nodes:
1043
909 c = self.changelog.read(n)
1044 for chnk in cl.group(nodes, identity,
910 for f in c[3]:
1045 changed_file_collector(changedfiles)):
911 changed[f] = 1
1046 yield chnk
912 changed = changed.keys()
1047 changedfiles = changedfiles.keys()
913 changed.sort()
1048 changedfiles.sort()
914
1049
915 # the changegroup is changesets + manifests + all file revs
1050 mnfst = self.manifest
916 revs = [ self.changelog.rev(n) for n in nodes ]
1051 def nodegen(revlog, reviter):
1052 for r in reviter:
1053 yield revlog.node(r)
1054 nodeiter = gennodelst(mnfst)
1055 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1056 yield chnk
917
1057
918 for y in self.changelog.group(linkmap): yield y
1058 for fname in changedfiles:
919 for y in self.manifest.group(linkmap): yield y
1059 filerevlog = self.file(fname)
920 for f in changed:
1060 nodeiter = gennodelst(filerevlog)
921 yield struct.pack(">l", len(f) + 4) + f
1061 nodeiter = list(nodeiter)
922 g = self.file(f).group(linkmap)
1062 if nodeiter:
923 for y in g:
1063 yield struct.pack(">l", len(fname) + 4) + fname
924 yield y
1064 lookup = lookuprevlink_func(filerevlog)
1065 for chnk in filerevlog.group(nodeiter, lookup):
1066 yield chnk
925
1067
926 yield struct.pack(">l", 0)
1068 yield struct.pack(">l", 0)
927
1069
928 return genread(gengroup())
1070 return util.chunkbuffer(gengroup())
929
1071
930 def addchangegroup(self, source):
1072 def addchangegroup(self, source):
931
1073
932 def getchunk():
1074 def getchunk():
933 d = source.read(4)
1075 d = source.read(4)
934 if not d: return ""
1076 if not d: return ""
935 l = struct.unpack(">l", d)[0]
1077 l = struct.unpack(">l", d)[0]
936 if l <= 4: return ""
1078 if l <= 4: return ""
937 d = source.read(l - 4)
1079 d = source.read(l - 4)
938 if len(d) < l - 4:
1080 if len(d) < l - 4:
939 raise repo.RepoError("premature EOF reading chunk" +
1081 raise repo.RepoError("premature EOF reading chunk" +
940 " (got %d bytes, expected %d)"
1082 " (got %d bytes, expected %d)"
941 % (len(d), l - 4))
1083 % (len(d), l - 4))
942 return d
1084 return d
943
1085
944 def getgroup():
1086 def getgroup():
945 while 1:
1087 while 1:
946 c = getchunk()
1088 c = getchunk()
947 if not c: break
1089 if not c: break
948 yield c
1090 yield c
949
1091
950 def csmap(x):
1092 def csmap(x):
951 self.ui.debug("add changeset %s\n" % short(x))
1093 self.ui.debug("add changeset %s\n" % short(x))
952 return self.changelog.count()
1094 return self.changelog.count()
953
1095
954 def revmap(x):
1096 def revmap(x):
955 return self.changelog.rev(x)
1097 return self.changelog.rev(x)
956
1098
957 if not source: return
1099 if not source: return
958 changesets = files = revisions = 0
1100 changesets = files = revisions = 0
959
1101
960 tr = self.transaction()
1102 tr = self.transaction()
961
1103
962 oldheads = len(self.changelog.heads())
1104 oldheads = len(self.changelog.heads())
963
1105
964 # pull off the changeset group
1106 # pull off the changeset group
965 self.ui.status("adding changesets\n")
1107 self.ui.status("adding changesets\n")
966 co = self.changelog.tip()
1108 co = self.changelog.tip()
967 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1109 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
968 cnr, cor = map(self.changelog.rev, (cn, co))
1110 cnr, cor = map(self.changelog.rev, (cn, co))
969 if cn == nullid:
1111 if cn == nullid:
970 cnr = cor
1112 cnr = cor
971 changesets = cnr - cor
1113 changesets = cnr - cor
972
1114
973 # pull off the manifest group
1115 # pull off the manifest group
974 self.ui.status("adding manifests\n")
1116 self.ui.status("adding manifests\n")
975 mm = self.manifest.tip()
1117 mm = self.manifest.tip()
976 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1118 mo = self.manifest.addgroup(getgroup(), revmap, tr)
977
1119
978 # process the files
1120 # process the files
979 self.ui.status("adding file changes\n")
1121 self.ui.status("adding file changes\n")
980 while 1:
1122 while 1:
981 f = getchunk()
1123 f = getchunk()
982 if not f: break
1124 if not f: break
983 self.ui.debug("adding %s revisions\n" % f)
1125 self.ui.debug("adding %s revisions\n" % f)
984 fl = self.file(f)
1126 fl = self.file(f)
985 o = fl.count()
1127 o = fl.count()
986 n = fl.addgroup(getgroup(), revmap, tr)
1128 n = fl.addgroup(getgroup(), revmap, tr)
987 revisions += fl.count() - o
1129 revisions += fl.count() - o
988 files += 1
1130 files += 1
989
1131
990 newheads = len(self.changelog.heads())
1132 newheads = len(self.changelog.heads())
991 heads = ""
1133 heads = ""
992 if oldheads and newheads > oldheads:
1134 if oldheads and newheads > oldheads:
993 heads = " (+%d heads)" % (newheads - oldheads)
1135 heads = " (+%d heads)" % (newheads - oldheads)
994
1136
995 self.ui.status(("added %d changesets" +
1137 self.ui.status(("added %d changesets" +
996 " with %d changes to %d files%s\n")
1138 " with %d changes to %d files%s\n")
997 % (changesets, revisions, files, heads))
1139 % (changesets, revisions, files, heads))
998
1140
999 tr.close()
1141 tr.close()
1000
1142
1001 if changesets > 0:
1143 if changesets > 0:
1002 if not self.hook("changegroup",
1144 if not self.hook("changegroup",
1003 node=hex(self.changelog.node(cor+1))):
1145 node=hex(self.changelog.node(cor+1))):
1004 self.ui.warn("abort: changegroup hook returned failure!\n")
1146 self.ui.warn("abort: changegroup hook returned failure!\n")
1005 return 1
1147 return 1
1006
1148
1007 for i in range(cor + 1, cnr + 1):
1149 for i in range(cor + 1, cnr + 1):
1008 self.hook("commit", node=hex(self.changelog.node(i)))
1150 self.hook("commit", node=hex(self.changelog.node(i)))
1009
1151
1010 return
1152 return
1011
1153
1012 def update(self, node, allow=False, force=False, choose=None,
1154 def update(self, node, allow=False, force=False, choose=None,
1013 moddirstate=True):
1155 moddirstate=True):
1014 pl = self.dirstate.parents()
1156 pl = self.dirstate.parents()
1015 if not force and pl[1] != nullid:
1157 if not force and pl[1] != nullid:
1016 self.ui.warn("aborting: outstanding uncommitted merges\n")
1158 self.ui.warn("aborting: outstanding uncommitted merges\n")
1017 return 1
1159 return 1
1018
1160
1019 p1, p2 = pl[0], node
1161 p1, p2 = pl[0], node
1020 pa = self.changelog.ancestor(p1, p2)
1162 pa = self.changelog.ancestor(p1, p2)
1021 m1n = self.changelog.read(p1)[0]
1163 m1n = self.changelog.read(p1)[0]
1022 m2n = self.changelog.read(p2)[0]
1164 m2n = self.changelog.read(p2)[0]
1023 man = self.manifest.ancestor(m1n, m2n)
1165 man = self.manifest.ancestor(m1n, m2n)
1024 m1 = self.manifest.read(m1n)
1166 m1 = self.manifest.read(m1n)
1025 mf1 = self.manifest.readflags(m1n)
1167 mf1 = self.manifest.readflags(m1n)
1026 m2 = self.manifest.read(m2n)
1168 m2 = self.manifest.read(m2n)
1027 mf2 = self.manifest.readflags(m2n)
1169 mf2 = self.manifest.readflags(m2n)
1028 ma = self.manifest.read(man)
1170 ma = self.manifest.read(man)
1029 mfa = self.manifest.readflags(man)
1171 mfa = self.manifest.readflags(man)
1030
1172
1031 (c, a, d, u) = self.changes()
1173 (c, a, d, u) = self.changes()
1032
1174
1033 # is this a jump, or a merge? i.e. is there a linear path
1175 # is this a jump, or a merge? i.e. is there a linear path
1034 # from p1 to p2?
1176 # from p1 to p2?
1035 linear_path = (pa == p1 or pa == p2)
1177 linear_path = (pa == p1 or pa == p2)
1036
1178
1037 # resolve the manifest to determine which files
1179 # resolve the manifest to determine which files
1038 # we care about merging
1180 # we care about merging
1039 self.ui.note("resolving manifests\n")
1181 self.ui.note("resolving manifests\n")
1040 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1182 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1041 (force, allow, moddirstate, linear_path))
1183 (force, allow, moddirstate, linear_path))
1042 self.ui.debug(" ancestor %s local %s remote %s\n" %
1184 self.ui.debug(" ancestor %s local %s remote %s\n" %
1043 (short(man), short(m1n), short(m2n)))
1185 (short(man), short(m1n), short(m2n)))
1044
1186
1045 merge = {}
1187 merge = {}
1046 get = {}
1188 get = {}
1047 remove = []
1189 remove = []
1048
1190
1049 # construct a working dir manifest
1191 # construct a working dir manifest
1050 mw = m1.copy()
1192 mw = m1.copy()
1051 mfw = mf1.copy()
1193 mfw = mf1.copy()
1052 umap = dict.fromkeys(u)
1194 umap = dict.fromkeys(u)
1053
1195
1054 for f in a + c + u:
1196 for f in a + c + u:
1055 mw[f] = ""
1197 mw[f] = ""
1056 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1198 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1057
1199
1058 for f in d:
1200 for f in d:
1059 if f in mw: del mw[f]
1201 if f in mw: del mw[f]
1060
1202
1061 # If we're jumping between revisions (as opposed to merging),
1203 # If we're jumping between revisions (as opposed to merging),
1062 # and if neither the working directory nor the target rev has
1204 # and if neither the working directory nor the target rev has
1063 # the file, then we need to remove it from the dirstate, to
1205 # the file, then we need to remove it from the dirstate, to
1064 # prevent the dirstate from listing the file when it is no
1206 # prevent the dirstate from listing the file when it is no
1065 # longer in the manifest.
1207 # longer in the manifest.
1066 if moddirstate and linear_path and f not in m2:
1208 if moddirstate and linear_path and f not in m2:
1067 self.dirstate.forget((f,))
1209 self.dirstate.forget((f,))
1068
1210
1069 # Compare manifests
1211 # Compare manifests
1070 for f, n in mw.iteritems():
1212 for f, n in mw.iteritems():
1071 if choose and not choose(f): continue
1213 if choose and not choose(f): continue
1072 if f in m2:
1214 if f in m2:
1073 s = 0
1215 s = 0
1074
1216
1075 # is the wfile new since m1, and match m2?
1217 # is the wfile new since m1, and match m2?
1076 if f not in m1:
1218 if f not in m1:
1077 t1 = self.wread(f)
1219 t1 = self.wread(f)
1078 t2 = self.file(f).read(m2[f])
1220 t2 = self.file(f).read(m2[f])
1079 if cmp(t1, t2) == 0:
1221 if cmp(t1, t2) == 0:
1080 n = m2[f]
1222 n = m2[f]
1081 del t1, t2
1223 del t1, t2
1082
1224
1083 # are files different?
1225 # are files different?
1084 if n != m2[f]:
1226 if n != m2[f]:
1085 a = ma.get(f, nullid)
1227 a = ma.get(f, nullid)
1086 # are both different from the ancestor?
1228 # are both different from the ancestor?
1087 if n != a and m2[f] != a:
1229 if n != a and m2[f] != a:
1088 self.ui.debug(" %s versions differ, resolve\n" % f)
1230 self.ui.debug(" %s versions differ, resolve\n" % f)
1089 # merge executable bits
1231 # merge executable bits
1090 # "if we changed or they changed, change in merge"
1232 # "if we changed or they changed, change in merge"
1091 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1233 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1092 mode = ((a^b) | (a^c)) ^ a
1234 mode = ((a^b) | (a^c)) ^ a
1093 merge[f] = (m1.get(f, nullid), m2[f], mode)
1235 merge[f] = (m1.get(f, nullid), m2[f], mode)
1094 s = 1
1236 s = 1
1095 # are we clobbering?
1237 # are we clobbering?
1096 # is remote's version newer?
1238 # is remote's version newer?
1097 # or are we going back in time?
1239 # or are we going back in time?
1098 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1240 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1099 self.ui.debug(" remote %s is newer, get\n" % f)
1241 self.ui.debug(" remote %s is newer, get\n" % f)
1100 get[f] = m2[f]
1242 get[f] = m2[f]
1101 s = 1
1243 s = 1
1102 elif f in umap:
1244 elif f in umap:
1103 # this unknown file is the same as the checkout
1245 # this unknown file is the same as the checkout
1104 get[f] = m2[f]
1246 get[f] = m2[f]
1105
1247
1106 if not s and mfw[f] != mf2[f]:
1248 if not s and mfw[f] != mf2[f]:
1107 if force:
1249 if force:
1108 self.ui.debug(" updating permissions for %s\n" % f)
1250 self.ui.debug(" updating permissions for %s\n" % f)
1109 util.set_exec(self.wjoin(f), mf2[f])
1251 util.set_exec(self.wjoin(f), mf2[f])
1110 else:
1252 else:
1111 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1253 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1112 mode = ((a^b) | (a^c)) ^ a
1254 mode = ((a^b) | (a^c)) ^ a
1113 if mode != b:
1255 if mode != b:
1114 self.ui.debug(" updating permissions for %s\n" % f)
1256 self.ui.debug(" updating permissions for %s\n" % f)
1115 util.set_exec(self.wjoin(f), mode)
1257 util.set_exec(self.wjoin(f), mode)
1116 del m2[f]
1258 del m2[f]
1117 elif f in ma:
1259 elif f in ma:
1118 if n != ma[f]:
1260 if n != ma[f]:
1119 r = "d"
1261 r = "d"
1120 if not force and (linear_path or allow):
1262 if not force and (linear_path or allow):
1121 r = self.ui.prompt(
1263 r = self.ui.prompt(
1122 (" local changed %s which remote deleted\n" % f) +
1264 (" local changed %s which remote deleted\n" % f) +
1123 "(k)eep or (d)elete?", "[kd]", "k")
1265 "(k)eep or (d)elete?", "[kd]", "k")
1124 if r == "d":
1266 if r == "d":
1125 remove.append(f)
1267 remove.append(f)
1126 else:
1268 else:
1127 self.ui.debug("other deleted %s\n" % f)
1269 self.ui.debug("other deleted %s\n" % f)
1128 remove.append(f) # other deleted it
1270 remove.append(f) # other deleted it
1129 else:
1271 else:
1130 # file is created on branch or in working directory
1272 # file is created on branch or in working directory
1131 if force and f not in umap:
1273 if force and f not in umap:
1132 self.ui.debug("remote deleted %s, clobbering\n" % f)
1274 self.ui.debug("remote deleted %s, clobbering\n" % f)
1133 remove.append(f)
1275 remove.append(f)
1134 elif n == m1.get(f, nullid): # same as parent
1276 elif n == m1.get(f, nullid): # same as parent
1135 if p2 == pa: # going backwards?
1277 if p2 == pa: # going backwards?
1136 self.ui.debug("remote deleted %s\n" % f)
1278 self.ui.debug("remote deleted %s\n" % f)
1137 remove.append(f)
1279 remove.append(f)
1138 else:
1280 else:
1139 self.ui.debug("local modified %s, keeping\n" % f)
1281 self.ui.debug("local modified %s, keeping\n" % f)
1140 else:
1282 else:
1141 self.ui.debug("working dir created %s, keeping\n" % f)
1283 self.ui.debug("working dir created %s, keeping\n" % f)
1142
1284
1143 for f, n in m2.iteritems():
1285 for f, n in m2.iteritems():
1144 if choose and not choose(f): continue
1286 if choose and not choose(f): continue
1145 if f[0] == "/": continue
1287 if f[0] == "/": continue
1146 if f in ma and n != ma[f]:
1288 if f in ma and n != ma[f]:
1147 r = "k"
1289 r = "k"
1148 if not force and (linear_path or allow):
1290 if not force and (linear_path or allow):
1149 r = self.ui.prompt(
1291 r = self.ui.prompt(
1150 ("remote changed %s which local deleted\n" % f) +
1292 ("remote changed %s which local deleted\n" % f) +
1151 "(k)eep or (d)elete?", "[kd]", "k")
1293 "(k)eep or (d)elete?", "[kd]", "k")
1152 if r == "k": get[f] = n
1294 if r == "k": get[f] = n
1153 elif f not in ma:
1295 elif f not in ma:
1154 self.ui.debug("remote created %s\n" % f)
1296 self.ui.debug("remote created %s\n" % f)
1155 get[f] = n
1297 get[f] = n
1156 else:
1298 else:
1157 if force or p2 == pa: # going backwards?
1299 if force or p2 == pa: # going backwards?
1158 self.ui.debug("local deleted %s, recreating\n" % f)
1300 self.ui.debug("local deleted %s, recreating\n" % f)
1159 get[f] = n
1301 get[f] = n
1160 else:
1302 else:
1161 self.ui.debug("local deleted %s\n" % f)
1303 self.ui.debug("local deleted %s\n" % f)
1162
1304
1163 del mw, m1, m2, ma
1305 del mw, m1, m2, ma
1164
1306
1165 if force:
1307 if force:
1166 for f in merge:
1308 for f in merge:
1167 get[f] = merge[f][1]
1309 get[f] = merge[f][1]
1168 merge = {}
1310 merge = {}
1169
1311
1170 if linear_path or force:
1312 if linear_path or force:
1171 # we don't need to do any magic, just jump to the new rev
1313 # we don't need to do any magic, just jump to the new rev
1172 branch_merge = False
1314 branch_merge = False
1173 p1, p2 = p2, nullid
1315 p1, p2 = p2, nullid
1174 else:
1316 else:
1175 if not allow:
1317 if not allow:
1176 self.ui.status("this update spans a branch" +
1318 self.ui.status("this update spans a branch" +
1177 " affecting the following files:\n")
1319 " affecting the following files:\n")
1178 fl = merge.keys() + get.keys()
1320 fl = merge.keys() + get.keys()
1179 fl.sort()
1321 fl.sort()
1180 for f in fl:
1322 for f in fl:
1181 cf = ""
1323 cf = ""
1182 if f in merge: cf = " (resolve)"
1324 if f in merge: cf = " (resolve)"
1183 self.ui.status(" %s%s\n" % (f, cf))
1325 self.ui.status(" %s%s\n" % (f, cf))
1184 self.ui.warn("aborting update spanning branches!\n")
1326 self.ui.warn("aborting update spanning branches!\n")
1185 self.ui.status("(use update -m to merge across branches" +
1327 self.ui.status("(use update -m to merge across branches" +
1186 " or -C to lose changes)\n")
1328 " or -C to lose changes)\n")
1187 return 1
1329 return 1
1188 branch_merge = True
1330 branch_merge = True
1189
1331
1190 if moddirstate:
1332 if moddirstate:
1191 self.dirstate.setparents(p1, p2)
1333 self.dirstate.setparents(p1, p2)
1192
1334
1193 # get the files we don't need to change
1335 # get the files we don't need to change
1194 files = get.keys()
1336 files = get.keys()
1195 files.sort()
1337 files.sort()
1196 for f in files:
1338 for f in files:
1197 if f[0] == "/": continue
1339 if f[0] == "/": continue
1198 self.ui.note("getting %s\n" % f)
1340 self.ui.note("getting %s\n" % f)
1199 t = self.file(f).read(get[f])
1341 t = self.file(f).read(get[f])
1200 try:
1342 try:
1201 self.wwrite(f, t)
1343 self.wwrite(f, t)
1202 except IOError, e:
1344 except IOError, e:
1203 if e.errno != errno.ENOENT:
1345 if e.errno != errno.ENOENT:
1204 raise
1346 raise
1205 os.makedirs(os.path.dirname(self.wjoin(f)))
1347 os.makedirs(os.path.dirname(self.wjoin(f)))
1206 self.wwrite(f, t)
1348 self.wwrite(f, t)
1207 util.set_exec(self.wjoin(f), mf2[f])
1349 util.set_exec(self.wjoin(f), mf2[f])
1208 if moddirstate:
1350 if moddirstate:
1209 if branch_merge:
1351 if branch_merge:
1210 self.dirstate.update([f], 'n', st_mtime=-1)
1352 self.dirstate.update([f], 'n', st_mtime=-1)
1211 else:
1353 else:
1212 self.dirstate.update([f], 'n')
1354 self.dirstate.update([f], 'n')
1213
1355
1214 # merge the tricky bits
1356 # merge the tricky bits
1215 files = merge.keys()
1357 files = merge.keys()
1216 files.sort()
1358 files.sort()
1217 for f in files:
1359 for f in files:
1218 self.ui.status("merging %s\n" % f)
1360 self.ui.status("merging %s\n" % f)
1219 my, other, flag = merge[f]
1361 my, other, flag = merge[f]
1220 self.merge3(f, my, other)
1362 self.merge3(f, my, other)
1221 util.set_exec(self.wjoin(f), flag)
1363 util.set_exec(self.wjoin(f), flag)
1222 if moddirstate:
1364 if moddirstate:
1223 if branch_merge:
1365 if branch_merge:
1224 # We've done a branch merge, mark this file as merged
1366 # We've done a branch merge, mark this file as merged
1225 # so that we properly record the merger later
1367 # so that we properly record the merger later
1226 self.dirstate.update([f], 'm')
1368 self.dirstate.update([f], 'm')
1227 else:
1369 else:
1228 # We've update-merged a locally modified file, so
1370 # We've update-merged a locally modified file, so
1229 # we set the dirstate to emulate a normal checkout
1371 # we set the dirstate to emulate a normal checkout
1230 # of that file some time in the past. Thus our
1372 # of that file some time in the past. Thus our
1231 # merge will appear as a normal local file
1373 # merge will appear as a normal local file
1232 # modification.
1374 # modification.
1233 f_len = len(self.file(f).read(other))
1375 f_len = len(self.file(f).read(other))
1234 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1376 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1235
1377
1236 remove.sort()
1378 remove.sort()
1237 for f in remove:
1379 for f in remove:
1238 self.ui.note("removing %s\n" % f)
1380 self.ui.note("removing %s\n" % f)
1239 try:
1381 try:
1240 os.unlink(self.wjoin(f))
1382 os.unlink(self.wjoin(f))
1241 except OSError, inst:
1383 except OSError, inst:
1242 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1384 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1243 # try removing directories that might now be empty
1385 # try removing directories that might now be empty
1244 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1386 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1245 except: pass
1387 except: pass
1246 if moddirstate:
1388 if moddirstate:
1247 if branch_merge:
1389 if branch_merge:
1248 self.dirstate.update(remove, 'r')
1390 self.dirstate.update(remove, 'r')
1249 else:
1391 else:
1250 self.dirstate.forget(remove)
1392 self.dirstate.forget(remove)
1251
1393
1252 def merge3(self, fn, my, other):
1394 def merge3(self, fn, my, other):
1253 """perform a 3-way merge in the working directory"""
1395 """perform a 3-way merge in the working directory"""
1254
1396
1255 def temp(prefix, node):
1397 def temp(prefix, node):
1256 pre = "%s~%s." % (os.path.basename(fn), prefix)
1398 pre = "%s~%s." % (os.path.basename(fn), prefix)
1257 (fd, name) = tempfile.mkstemp("", pre)
1399 (fd, name) = tempfile.mkstemp("", pre)
1258 f = os.fdopen(fd, "wb")
1400 f = os.fdopen(fd, "wb")
1259 self.wwrite(fn, fl.read(node), f)
1401 self.wwrite(fn, fl.read(node), f)
1260 f.close()
1402 f.close()
1261 return name
1403 return name
1262
1404
1263 fl = self.file(fn)
1405 fl = self.file(fn)
1264 base = fl.ancestor(my, other)
1406 base = fl.ancestor(my, other)
1265 a = self.wjoin(fn)
1407 a = self.wjoin(fn)
1266 b = temp("base", base)
1408 b = temp("base", base)
1267 c = temp("other", other)
1409 c = temp("other", other)
1268
1410
1269 self.ui.note("resolving %s\n" % fn)
1411 self.ui.note("resolving %s\n" % fn)
1270 self.ui.debug("file %s: my %s other %s ancestor %s\n" %
1412 self.ui.debug("file %s: my %s other %s ancestor %s\n" %
1271 (fn, short(my), short(other), short(base)))
1413 (fn, short(my), short(other), short(base)))
1272
1414
1273 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1415 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1274 or "hgmerge")
1416 or "hgmerge")
1275 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1417 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1276 if r:
1418 if r:
1277 self.ui.warn("merging %s failed!\n" % fn)
1419 self.ui.warn("merging %s failed!\n" % fn)
1278
1420
1279 os.unlink(b)
1421 os.unlink(b)
1280 os.unlink(c)
1422 os.unlink(c)
1281
1423
1282 def verify(self):
1424 def verify(self):
1283 filelinkrevs = {}
1425 filelinkrevs = {}
1284 filenodes = {}
1426 filenodes = {}
1285 changesets = revisions = files = 0
1427 changesets = revisions = files = 0
1286 errors = [0]
1428 errors = [0]
1287 neededmanifests = {}
1429 neededmanifests = {}
1288
1430
1289 def err(msg):
1431 def err(msg):
1290 self.ui.warn(msg + "\n")
1432 self.ui.warn(msg + "\n")
1291 errors[0] += 1
1433 errors[0] += 1
1292
1434
1293 seen = {}
1435 seen = {}
1294 self.ui.status("checking changesets\n")
1436 self.ui.status("checking changesets\n")
1295 for i in range(self.changelog.count()):
1437 for i in range(self.changelog.count()):
1296 changesets += 1
1438 changesets += 1
1297 n = self.changelog.node(i)
1439 n = self.changelog.node(i)
1298 l = self.changelog.linkrev(n)
1440 l = self.changelog.linkrev(n)
1299 if l != i:
1441 if l != i:
1300 err("incorrect link (%d) for changeset revision %d" % (l, i))
1442 err("incorrect link (%d) for changeset revision %d" % (l, i))
1301 if n in seen:
1443 if n in seen:
1302 err("duplicate changeset at revision %d" % i)
1444 err("duplicate changeset at revision %d" % i)
1303 seen[n] = 1
1445 seen[n] = 1
1304
1446
1305 for p in self.changelog.parents(n):
1447 for p in self.changelog.parents(n):
1306 if p not in self.changelog.nodemap:
1448 if p not in self.changelog.nodemap:
1307 err("changeset %s has unknown parent %s" %
1449 err("changeset %s has unknown parent %s" %
1308 (short(n), short(p)))
1450 (short(n), short(p)))
1309 try:
1451 try:
1310 changes = self.changelog.read(n)
1452 changes = self.changelog.read(n)
1311 except Exception, inst:
1453 except Exception, inst:
1312 err("unpacking changeset %s: %s" % (short(n), inst))
1454 err("unpacking changeset %s: %s" % (short(n), inst))
1313
1455
1314 neededmanifests[changes[0]] = n
1456 neededmanifests[changes[0]] = n
1315
1457
1316 for f in changes[3]:
1458 for f in changes[3]:
1317 filelinkrevs.setdefault(f, []).append(i)
1459 filelinkrevs.setdefault(f, []).append(i)
1318
1460
1319 seen = {}
1461 seen = {}
1320 self.ui.status("checking manifests\n")
1462 self.ui.status("checking manifests\n")
1321 for i in range(self.manifest.count()):
1463 for i in range(self.manifest.count()):
1322 n = self.manifest.node(i)
1464 n = self.manifest.node(i)
1323 l = self.manifest.linkrev(n)
1465 l = self.manifest.linkrev(n)
1324
1466
1325 if l < 0 or l >= self.changelog.count():
1467 if l < 0 or l >= self.changelog.count():
1326 err("bad manifest link (%d) at revision %d" % (l, i))
1468 err("bad manifest link (%d) at revision %d" % (l, i))
1327
1469
1328 if n in neededmanifests:
1470 if n in neededmanifests:
1329 del neededmanifests[n]
1471 del neededmanifests[n]
1330
1472
1331 if n in seen:
1473 if n in seen:
1332 err("duplicate manifest at revision %d" % i)
1474 err("duplicate manifest at revision %d" % i)
1333
1475
1334 seen[n] = 1
1476 seen[n] = 1
1335
1477
1336 for p in self.manifest.parents(n):
1478 for p in self.manifest.parents(n):
1337 if p not in self.manifest.nodemap:
1479 if p not in self.manifest.nodemap:
1338 err("manifest %s has unknown parent %s" %
1480 err("manifest %s has unknown parent %s" %
1339 (short(n), short(p)))
1481 (short(n), short(p)))
1340
1482
1341 try:
1483 try:
1342 delta = mdiff.patchtext(self.manifest.delta(n))
1484 delta = mdiff.patchtext(self.manifest.delta(n))
1343 except KeyboardInterrupt:
1485 except KeyboardInterrupt:
1344 self.ui.warn("interrupted")
1486 self.ui.warn("interrupted")
1345 raise
1487 raise
1346 except Exception, inst:
1488 except Exception, inst:
1347 err("unpacking manifest %s: %s" % (short(n), inst))
1489 err("unpacking manifest %s: %s" % (short(n), inst))
1348
1490
1349 ff = [ l.split('\0') for l in delta.splitlines() ]
1491 ff = [ l.split('\0') for l in delta.splitlines() ]
1350 for f, fn in ff:
1492 for f, fn in ff:
1351 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1493 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1352
1494
1353 self.ui.status("crosschecking files in changesets and manifests\n")
1495 self.ui.status("crosschecking files in changesets and manifests\n")
1354
1496
1355 for m,c in neededmanifests.items():
1497 for m,c in neededmanifests.items():
1356 err("Changeset %s refers to unknown manifest %s" %
1498 err("Changeset %s refers to unknown manifest %s" %
1357 (short(m), short(c)))
1499 (short(m), short(c)))
1358 del neededmanifests
1500 del neededmanifests
1359
1501
1360 for f in filenodes:
1502 for f in filenodes:
1361 if f not in filelinkrevs:
1503 if f not in filelinkrevs:
1362 err("file %s in manifest but not in changesets" % f)
1504 err("file %s in manifest but not in changesets" % f)
1363
1505
1364 for f in filelinkrevs:
1506 for f in filelinkrevs:
1365 if f not in filenodes:
1507 if f not in filenodes:
1366 err("file %s in changeset but not in manifest" % f)
1508 err("file %s in changeset but not in manifest" % f)
1367
1509
1368 self.ui.status("checking files\n")
1510 self.ui.status("checking files\n")
1369 ff = filenodes.keys()
1511 ff = filenodes.keys()
1370 ff.sort()
1512 ff.sort()
1371 for f in ff:
1513 for f in ff:
1372 if f == "/dev/null": continue
1514 if f == "/dev/null": continue
1373 files += 1
1515 files += 1
1374 fl = self.file(f)
1516 fl = self.file(f)
1375 nodes = { nullid: 1 }
1517 nodes = { nullid: 1 }
1376 seen = {}
1518 seen = {}
1377 for i in range(fl.count()):
1519 for i in range(fl.count()):
1378 revisions += 1
1520 revisions += 1
1379 n = fl.node(i)
1521 n = fl.node(i)
1380
1522
1381 if n in seen:
1523 if n in seen:
1382 err("%s: duplicate revision %d" % (f, i))
1524 err("%s: duplicate revision %d" % (f, i))
1383 if n not in filenodes[f]:
1525 if n not in filenodes[f]:
1384 err("%s: %d:%s not in manifests" % (f, i, short(n)))
1526 err("%s: %d:%s not in manifests" % (f, i, short(n)))
1385 else:
1527 else:
1386 del filenodes[f][n]
1528 del filenodes[f][n]
1387
1529
1388 flr = fl.linkrev(n)
1530 flr = fl.linkrev(n)
1389 if flr not in filelinkrevs[f]:
1531 if flr not in filelinkrevs[f]:
1390 err("%s:%s points to unexpected changeset %d"
1532 err("%s:%s points to unexpected changeset %d"
1391 % (f, short(n), flr))
1533 % (f, short(n), flr))
1392 else:
1534 else:
1393 filelinkrevs[f].remove(flr)
1535 filelinkrevs[f].remove(flr)
1394
1536
1395 # verify contents
1537 # verify contents
1396 try:
1538 try:
1397 t = fl.read(n)
1539 t = fl.read(n)
1398 except Exception, inst:
1540 except Exception, inst:
1399 err("unpacking file %s %s: %s" % (f, short(n), inst))
1541 err("unpacking file %s %s: %s" % (f, short(n), inst))
1400
1542
1401 # verify parents
1543 # verify parents
1402 (p1, p2) = fl.parents(n)
1544 (p1, p2) = fl.parents(n)
1403 if p1 not in nodes:
1545 if p1 not in nodes:
1404 err("file %s:%s unknown parent 1 %s" %
1546 err("file %s:%s unknown parent 1 %s" %
1405 (f, short(n), short(p1)))
1547 (f, short(n), short(p1)))
1406 if p2 not in nodes:
1548 if p2 not in nodes:
1407 err("file %s:%s unknown parent 2 %s" %
1549 err("file %s:%s unknown parent 2 %s" %
1408 (f, short(n), short(p1)))
1550 (f, short(n), short(p1)))
1409 nodes[n] = 1
1551 nodes[n] = 1
1410
1552
1411 # cross-check
1553 # cross-check
1412 for node in filenodes[f]:
1554 for node in filenodes[f]:
1413 err("node %s in manifests not in %s" % (hex(node), f))
1555 err("node %s in manifests not in %s" % (hex(node), f))
1414
1556
1415 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1557 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1416 (files, changesets, revisions))
1558 (files, changesets, revisions))
1417
1559
1418 if errors[0]:
1560 if errors[0]:
1419 self.ui.warn("%d integrity errors encountered!\n" % errors[0])
1561 self.ui.warn("%d integrity errors encountered!\n" % errors[0])
1420 return 1
1562 return 1
@@ -1,817 +1,814 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from demandload import demandload
14 from demandload import demandload
15 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
15 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
16
16
17 def hash(text, p1, p2):
17 def hash(text, p1, p2):
18 """generate a hash from the given text and its parent hashes
18 """generate a hash from the given text and its parent hashes
19
19
20 This hash combines both the current file contents and its history
20 This hash combines both the current file contents and its history
21 in a manner that makes it easy to distinguish nodes with the same
21 in a manner that makes it easy to distinguish nodes with the same
22 content in the revision graph.
22 content in the revision graph.
23 """
23 """
24 l = [p1, p2]
24 l = [p1, p2]
25 l.sort()
25 l.sort()
26 s = sha.new(l[0])
26 s = sha.new(l[0])
27 s.update(l[1])
27 s.update(l[1])
28 s.update(text)
28 s.update(text)
29 return s.digest()
29 return s.digest()
30
30
31 def compress(text):
31 def compress(text):
32 """ generate a possibly-compressed representation of text """
32 """ generate a possibly-compressed representation of text """
33 if not text: return text
33 if not text: return text
34 if len(text) < 44:
34 if len(text) < 44:
35 if text[0] == '\0': return text
35 if text[0] == '\0': return text
36 return 'u' + text
36 return 'u' + text
37 bin = zlib.compress(text)
37 bin = zlib.compress(text)
38 if len(bin) > len(text):
38 if len(bin) > len(text):
39 if text[0] == '\0': return text
39 if text[0] == '\0': return text
40 return 'u' + text
40 return 'u' + text
41 return bin
41 return bin
42
42
43 def decompress(bin):
43 def decompress(bin):
44 """ decompress the given input """
44 """ decompress the given input """
45 if not bin: return bin
45 if not bin: return bin
46 t = bin[0]
46 t = bin[0]
47 if t == '\0': return bin
47 if t == '\0': return bin
48 if t == 'x': return zlib.decompress(bin)
48 if t == 'x': return zlib.decompress(bin)
49 if t == 'u': return bin[1:]
49 if t == 'u': return bin[1:]
50 raise RevlogError("unknown compression type %s" % t)
50 raise RevlogError("unknown compression type %s" % t)
51
51
52 indexformat = ">4l20s20s20s"
52 indexformat = ">4l20s20s20s"
53
53
54 class lazyparser:
54 class lazyparser:
55 """
55 """
56 this class avoids the need to parse the entirety of large indices
56 this class avoids the need to parse the entirety of large indices
57
57
58 By default we parse and load 1000 entries at a time.
58 By default we parse and load 1000 entries at a time.
59
59
60 If no position is specified, we load the whole index, and replace
60 If no position is specified, we load the whole index, and replace
61 the lazy objects in revlog with the underlying objects for
61 the lazy objects in revlog with the underlying objects for
62 efficiency in cases where we look at most of the nodes.
62 efficiency in cases where we look at most of the nodes.
63 """
63 """
64 def __init__(self, data, revlog):
64 def __init__(self, data, revlog):
65 self.data = data
65 self.data = data
66 self.s = struct.calcsize(indexformat)
66 self.s = struct.calcsize(indexformat)
67 self.l = len(data)/self.s
67 self.l = len(data)/self.s
68 self.index = [None] * self.l
68 self.index = [None] * self.l
69 self.map = {nullid: -1}
69 self.map = {nullid: -1}
70 self.all = 0
70 self.all = 0
71 self.revlog = revlog
71 self.revlog = revlog
72
72
73 def load(self, pos=None):
73 def load(self, pos=None):
74 if self.all: return
74 if self.all: return
75 if pos is not None:
75 if pos is not None:
76 block = pos / 1000
76 block = pos / 1000
77 i = block * 1000
77 i = block * 1000
78 end = min(self.l, i + 1000)
78 end = min(self.l, i + 1000)
79 else:
79 else:
80 self.all = 1
80 self.all = 1
81 i = 0
81 i = 0
82 end = self.l
82 end = self.l
83 self.revlog.index = self.index
83 self.revlog.index = self.index
84 self.revlog.nodemap = self.map
84 self.revlog.nodemap = self.map
85
85
86 while i < end:
86 while i < end:
87 d = self.data[i * self.s: (i + 1) * self.s]
87 d = self.data[i * self.s: (i + 1) * self.s]
88 e = struct.unpack(indexformat, d)
88 e = struct.unpack(indexformat, d)
89 self.index[i] = e
89 self.index[i] = e
90 self.map[e[6]] = i
90 self.map[e[6]] = i
91 i += 1
91 i += 1
92
92
93 class lazyindex:
93 class lazyindex:
94 """a lazy version of the index array"""
94 """a lazy version of the index array"""
95 def __init__(self, parser):
95 def __init__(self, parser):
96 self.p = parser
96 self.p = parser
97 def __len__(self):
97 def __len__(self):
98 return len(self.p.index)
98 return len(self.p.index)
99 def load(self, pos):
99 def load(self, pos):
100 self.p.load(pos)
100 self.p.load(pos)
101 return self.p.index[pos]
101 return self.p.index[pos]
102 def __getitem__(self, pos):
102 def __getitem__(self, pos):
103 return self.p.index[pos] or self.load(pos)
103 return self.p.index[pos] or self.load(pos)
104 def append(self, e):
104 def append(self, e):
105 self.p.index.append(e)
105 self.p.index.append(e)
106
106
107 class lazymap:
107 class lazymap:
108 """a lazy version of the node map"""
108 """a lazy version of the node map"""
109 def __init__(self, parser):
109 def __init__(self, parser):
110 self.p = parser
110 self.p = parser
111 def load(self, key):
111 def load(self, key):
112 if self.p.all: return
112 if self.p.all: return
113 n = self.p.data.find(key)
113 n = self.p.data.find(key)
114 if n < 0:
114 if n < 0:
115 raise KeyError(key)
115 raise KeyError(key)
116 pos = n / self.p.s
116 pos = n / self.p.s
117 self.p.load(pos)
117 self.p.load(pos)
118 def __contains__(self, key):
118 def __contains__(self, key):
119 self.p.load()
119 self.p.load()
120 return key in self.p.map
120 return key in self.p.map
121 def __iter__(self):
121 def __iter__(self):
122 yield nullid
122 yield nullid
123 for i in xrange(self.p.l):
123 for i in xrange(self.p.l):
124 try:
124 try:
125 yield self.p.index[i][6]
125 yield self.p.index[i][6]
126 except:
126 except:
127 self.p.load(i)
127 self.p.load(i)
128 yield self.p.index[i][6]
128 yield self.p.index[i][6]
129 def __getitem__(self, key):
129 def __getitem__(self, key):
130 try:
130 try:
131 return self.p.map[key]
131 return self.p.map[key]
132 except KeyError:
132 except KeyError:
133 try:
133 try:
134 self.load(key)
134 self.load(key)
135 return self.p.map[key]
135 return self.p.map[key]
136 except KeyError:
136 except KeyError:
137 raise KeyError("node " + hex(key))
137 raise KeyError("node " + hex(key))
138 def __setitem__(self, key, val):
138 def __setitem__(self, key, val):
139 self.p.map[key] = val
139 self.p.map[key] = val
140
140
141 class RevlogError(Exception): pass
141 class RevlogError(Exception): pass
142
142
143 class revlog:
143 class revlog:
144 """
144 """
145 the underlying revision storage object
145 the underlying revision storage object
146
146
147 A revlog consists of two parts, an index and the revision data.
147 A revlog consists of two parts, an index and the revision data.
148
148
149 The index is a file with a fixed record size containing
149 The index is a file with a fixed record size containing
150 information on each revision, includings its nodeid (hash), the
150 information on each revision, includings its nodeid (hash), the
151 nodeids of its parents, the position and offset of its data within
151 nodeids of its parents, the position and offset of its data within
152 the data file, and the revision it's based on. Finally, each entry
152 the data file, and the revision it's based on. Finally, each entry
153 contains a linkrev entry that can serve as a pointer to external
153 contains a linkrev entry that can serve as a pointer to external
154 data.
154 data.
155
155
156 The revision data itself is a linear collection of data chunks.
156 The revision data itself is a linear collection of data chunks.
157 Each chunk represents a revision and is usually represented as a
157 Each chunk represents a revision and is usually represented as a
158 delta against the previous chunk. To bound lookup time, runs of
158 delta against the previous chunk. To bound lookup time, runs of
159 deltas are limited to about 2 times the length of the original
159 deltas are limited to about 2 times the length of the original
160 version data. This makes retrieval of a version proportional to
160 version data. This makes retrieval of a version proportional to
161 its size, or O(1) relative to the number of revisions.
161 its size, or O(1) relative to the number of revisions.
162
162
163 Both pieces of the revlog are written to in an append-only
163 Both pieces of the revlog are written to in an append-only
164 fashion, which means we never need to rewrite a file to insert or
164 fashion, which means we never need to rewrite a file to insert or
165 remove data, and can use some simple techniques to avoid the need
165 remove data, and can use some simple techniques to avoid the need
166 for locking while reading.
166 for locking while reading.
167 """
167 """
168 def __init__(self, opener, indexfile, datafile):
168 def __init__(self, opener, indexfile, datafile):
169 """
169 """
170 create a revlog object
170 create a revlog object
171
171
172 opener is a function that abstracts the file opening operation
172 opener is a function that abstracts the file opening operation
173 and can be used to implement COW semantics or the like.
173 and can be used to implement COW semantics or the like.
174 """
174 """
175 self.indexfile = indexfile
175 self.indexfile = indexfile
176 self.datafile = datafile
176 self.datafile = datafile
177 self.opener = opener
177 self.opener = opener
178 self.cache = None
178 self.cache = None
179
179
180 try:
180 try:
181 i = self.opener(self.indexfile).read()
181 i = self.opener(self.indexfile).read()
182 except IOError, inst:
182 except IOError, inst:
183 if inst.errno != errno.ENOENT:
183 if inst.errno != errno.ENOENT:
184 raise
184 raise
185 i = ""
185 i = ""
186
186
187 if len(i) > 10000:
187 if len(i) > 10000:
188 # big index, let's parse it on demand
188 # big index, let's parse it on demand
189 parser = lazyparser(i, self)
189 parser = lazyparser(i, self)
190 self.index = lazyindex(parser)
190 self.index = lazyindex(parser)
191 self.nodemap = lazymap(parser)
191 self.nodemap = lazymap(parser)
192 else:
192 else:
193 s = struct.calcsize(indexformat)
193 s = struct.calcsize(indexformat)
194 l = len(i) / s
194 l = len(i) / s
195 self.index = [None] * l
195 self.index = [None] * l
196 m = [None] * l
196 m = [None] * l
197
197
198 n = 0
198 n = 0
199 for f in xrange(0, len(i), s):
199 for f in xrange(0, len(i), s):
200 # offset, size, base, linkrev, p1, p2, nodeid
200 # offset, size, base, linkrev, p1, p2, nodeid
201 e = struct.unpack(indexformat, i[f:f + s])
201 e = struct.unpack(indexformat, i[f:f + s])
202 m[n] = (e[6], n)
202 m[n] = (e[6], n)
203 self.index[n] = e
203 self.index[n] = e
204 n += 1
204 n += 1
205
205
206 self.nodemap = dict(m)
206 self.nodemap = dict(m)
207 self.nodemap[nullid] = -1
207 self.nodemap[nullid] = -1
208
208
209 def tip(self): return self.node(len(self.index) - 1)
209 def tip(self): return self.node(len(self.index) - 1)
210 def count(self): return len(self.index)
210 def count(self): return len(self.index)
211 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
211 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
212 def rev(self, node):
212 def rev(self, node):
213 try:
213 try:
214 return self.nodemap[node]
214 return self.nodemap[node]
215 except KeyError:
215 except KeyError:
216 raise RevlogError('%s: no node %s' % (self.indexfile, hex(node)))
216 raise RevlogError('%s: no node %s' % (self.indexfile, hex(node)))
217 def linkrev(self, node): return self.index[self.rev(node)][3]
217 def linkrev(self, node): return self.index[self.rev(node)][3]
218 def parents(self, node):
218 def parents(self, node):
219 if node == nullid: return (nullid, nullid)
219 if node == nullid: return (nullid, nullid)
220 return self.index[self.rev(node)][4:6]
220 return self.index[self.rev(node)][4:6]
221
221
222 def start(self, rev): return self.index[rev][0]
222 def start(self, rev): return self.index[rev][0]
223 def length(self, rev): return self.index[rev][1]
223 def length(self, rev): return self.index[rev][1]
224 def end(self, rev): return self.start(rev) + self.length(rev)
224 def end(self, rev): return self.start(rev) + self.length(rev)
225 def base(self, rev): return self.index[rev][2]
225 def base(self, rev): return self.index[rev][2]
226
226
227 def reachable(self, rev, stop=None):
227 def reachable(self, rev, stop=None):
228 reachable = {}
228 reachable = {}
229 visit = [rev]
229 visit = [rev]
230 reachable[rev] = 1
230 reachable[rev] = 1
231 if stop:
231 if stop:
232 stopn = self.rev(stop)
232 stopn = self.rev(stop)
233 else:
233 else:
234 stopn = 0
234 stopn = 0
235 while visit:
235 while visit:
236 n = visit.pop(0)
236 n = visit.pop(0)
237 if n == stop:
237 if n == stop:
238 continue
238 continue
239 if n == nullid:
239 if n == nullid:
240 continue
240 continue
241 for p in self.parents(n):
241 for p in self.parents(n):
242 if self.rev(p) < stopn:
242 if self.rev(p) < stopn:
243 continue
243 continue
244 if p not in reachable:
244 if p not in reachable:
245 reachable[p] = 1
245 reachable[p] = 1
246 visit.append(p)
246 visit.append(p)
247 return reachable
247 return reachable
248
248
249 def nodesbetween(self, roots=None, heads=None):
249 def nodesbetween(self, roots=None, heads=None):
250 """Return a tuple containing three elements. Elements 1 and 2 contain
250 """Return a tuple containing three elements. Elements 1 and 2 contain
251 a final list bases and heads after all the unreachable ones have been
251 a final list bases and heads after all the unreachable ones have been
252 pruned. Element 0 contains a topologically sorted list of all
252 pruned. Element 0 contains a topologically sorted list of all
253
253
254 nodes that satisfy these constraints:
254 nodes that satisfy these constraints:
255 1. All nodes must be descended from a node in roots (the nodes on
255 1. All nodes must be descended from a node in roots (the nodes on
256 roots are considered descended from themselves).
256 roots are considered descended from themselves).
257 2. All nodes must also be ancestors of a node in heads (the nodes in
257 2. All nodes must also be ancestors of a node in heads (the nodes in
258 heads are considered to be their own ancestors).
258 heads are considered to be their own ancestors).
259
259
260 If roots is unspecified, nullid is assumed as the only root.
260 If roots is unspecified, nullid is assumed as the only root.
261 If heads is unspecified, it is taken to be the output of the
261 If heads is unspecified, it is taken to be the output of the
262 heads method (i.e. a list of all nodes in the repository that
262 heads method (i.e. a list of all nodes in the repository that
263 have no children)."""
263 have no children)."""
264 if roots is not None:
264 if roots is not None:
265 roots = list(roots)
265 roots = list(roots)
266 lowestrev = min([self.rev(n) for n in roots])
266 lowestrev = min([self.rev(n) for n in roots])
267 else:
267 else:
268 roots = [nullid] # Everybody's a descendent of nullid
268 roots = [nullid] # Everybody's a descendent of nullid
269 lowestrev = -1
269 lowestrev = -1
270 if (lowestrev == -1) and (heads is None):
270 if (lowestrev == -1) and (heads is None):
271 # We want _all_ the nodes!
271 # We want _all_ the nodes!
272 return ([self.node(r) for r in xrange(0, self.count())],
272 return ([self.node(r) for r in xrange(0, self.count())],
273 [nullid], list(self.heads()))
273 [nullid], list(self.heads()))
274 if heads is None:
274 if heads is None:
275 # All nodes are ancestors, so the latest ancestor is the last
275 # All nodes are ancestors, so the latest ancestor is the last
276 # node.
276 # node.
277 highestrev = self.count() - 1
277 highestrev = self.count() - 1
278 # Set ancestors to None to signal that every node is an ancestor.
278 # Set ancestors to None to signal that every node is an ancestor.
279 ancestors = None
279 ancestors = None
280 # Set heads to an empty dictionary for later discovery of heads
280 # Set heads to an empty dictionary for later discovery of heads
281 heads = {}
281 heads = {}
282 else:
282 else:
283 ancestors = {}
283 ancestors = {}
284 # Start at the top and keep marking parents until we're done.
284 # Start at the top and keep marking parents until we're done.
285 nodestotag = list(heads)
285 nodestotag = list(heads)
286 # Turn heads into a dictionary so we can remove 'fake' heads.
286 # Turn heads into a dictionary so we can remove 'fake' heads.
287 # Also, later we will be using it to filter out the heads we can't
287 # Also, later we will be using it to filter out the heads we can't
288 # find from roots.
288 # find from roots.
289 heads = dict.fromkeys(heads, 0)
289 heads = dict.fromkeys(heads, 0)
290 # Remember where the top was so we can use it as a limit later.
290 # Remember where the top was so we can use it as a limit later.
291 highestrev = max([self.rev(n) for n in nodestotag])
291 highestrev = max([self.rev(n) for n in nodestotag])
292 while nodestotag:
292 while nodestotag:
293 # grab a node to tag
293 # grab a node to tag
294 n = nodestotag.pop()
294 n = nodestotag.pop()
295 # Never tag nullid
295 # Never tag nullid
296 if n == nullid:
296 if n == nullid:
297 continue
297 continue
298 # A node's revision number represents its place in a
298 # A node's revision number represents its place in a
299 # topologically sorted list of nodes.
299 # topologically sorted list of nodes.
300 r = self.rev(n)
300 r = self.rev(n)
301 if r >= lowestrev:
301 if r >= lowestrev:
302 if n not in ancestors:
302 if n not in ancestors:
303 # If we are possibly a descendent of one of the roots
303 # If we are possibly a descendent of one of the roots
304 # and we haven't already been marked as an ancestor
304 # and we haven't already been marked as an ancestor
305 ancestors[n] = 1 # Mark as ancestor
305 ancestors[n] = 1 # Mark as ancestor
306 # Add non-nullid parents to list of nodes to tag.
306 # Add non-nullid parents to list of nodes to tag.
307 nodestotag.extend([p for p in self.parents(n) if
307 nodestotag.extend([p for p in self.parents(n) if
308 p != nullid])
308 p != nullid])
309 elif n in heads: # We've seen it before, is it a fake head?
309 elif n in heads: # We've seen it before, is it a fake head?
310 # So it is, real heads should not be the ancestors of
310 # So it is, real heads should not be the ancestors of
311 # any other heads.
311 # any other heads.
312 heads.pop(n)
312 heads.pop(n)
313 # Now that we have our set of ancestors, we want to remove any
313 # Now that we have our set of ancestors, we want to remove any
314 # roots that are not ancestors.
314 # roots that are not ancestors.
315
315
316 # If one of the roots was nullid, everything is included anyway.
316 # If one of the roots was nullid, everything is included anyway.
317 if lowestrev > -1:
317 if lowestrev > -1:
318 # But, since we weren't, let's recompute the lowest rev to not
318 # But, since we weren't, let's recompute the lowest rev to not
319 # include roots that aren't ancestors.
319 # include roots that aren't ancestors.
320
320
321 # Filter out roots that aren't ancestors of heads
321 # Filter out roots that aren't ancestors of heads
322 roots = [n for n in roots if n in ancestors]
322 roots = [n for n in roots if n in ancestors]
323 # Recompute the lowest revision
323 # Recompute the lowest revision
324 if roots:
324 if roots:
325 lowestrev = min([self.rev(n) for n in roots])
325 lowestrev = min([self.rev(n) for n in roots])
326 else:
326 else:
327 # No more roots? Return empty list
327 # No more roots? Return empty list
328 return ([], [], [])
328 return ([], [], [])
329 else:
329 else:
330 # We are descending from nullid, and don't need to care about
330 # We are descending from nullid, and don't need to care about
331 # any other roots.
331 # any other roots.
332 lowestrev = -1
332 lowestrev = -1
333 roots = [nullid]
333 roots = [nullid]
334 # Transform our roots list into a 'set' (i.e. a dictionary where the
334 # Transform our roots list into a 'set' (i.e. a dictionary where the
335 # values don't matter.
335 # values don't matter.
336 descendents = dict.fromkeys(roots, 1)
336 descendents = dict.fromkeys(roots, 1)
337 # Also, keep the original roots so we can filter out roots that aren't
337 # Also, keep the original roots so we can filter out roots that aren't
338 # 'real' roots (i.e. are descended from other roots).
338 # 'real' roots (i.e. are descended from other roots).
339 roots = descendents.copy()
339 roots = descendents.copy()
340 # Our topologically sorted list of output nodes.
340 # Our topologically sorted list of output nodes.
341 orderedout = []
341 orderedout = []
342 # Don't start at nullid since we don't want nullid in our output list,
342 # Don't start at nullid since we don't want nullid in our output list,
343 # and if nullid shows up in descedents, empty parents will look like
343 # and if nullid shows up in descedents, empty parents will look like
344 # they're descendents.
344 # they're descendents.
345 for r in xrange(max(lowestrev, 0), highestrev + 1):
345 for r in xrange(max(lowestrev, 0), highestrev + 1):
346 n = self.node(r)
346 n = self.node(r)
347 isdescendent = False
347 isdescendent = False
348 if lowestrev == -1: # Everybody is a descendent of nullid
348 if lowestrev == -1: # Everybody is a descendent of nullid
349 isdescendent = True
349 isdescendent = True
350 elif n in descendents:
350 elif n in descendents:
351 # n is already a descendent
351 # n is already a descendent
352 isdescendent = True
352 isdescendent = True
353 # This check only needs to be done here because all the roots
353 # This check only needs to be done here because all the roots
354 # will start being marked is descendents before the loop.
354 # will start being marked is descendents before the loop.
355 if n in roots:
355 if n in roots:
356 # If n was a root, check if it's a 'real' root.
356 # If n was a root, check if it's a 'real' root.
357 p = tuple(self.parents(n))
357 p = tuple(self.parents(n))
358 # If any of its parents are descendents, it's not a root.
358 # If any of its parents are descendents, it's not a root.
359 if (p[0] in descendents) or (p[1] in descendents):
359 if (p[0] in descendents) or (p[1] in descendents):
360 roots.pop(n)
360 roots.pop(n)
361 else:
361 else:
362 p = tuple(self.parents(n))
362 p = tuple(self.parents(n))
363 # A node is a descendent if either of its parents are
363 # A node is a descendent if either of its parents are
364 # descendents. (We seeded the dependents list with the roots
364 # descendents. (We seeded the dependents list with the roots
365 # up there, remember?)
365 # up there, remember?)
366 if (p[0] in descendents) or (p[1] in descendents):
366 if (p[0] in descendents) or (p[1] in descendents):
367 descendents[n] = 1
367 descendents[n] = 1
368 isdescendent = True
368 isdescendent = True
369 if isdescendent and ((ancestors is None) or (n in ancestors)):
369 if isdescendent and ((ancestors is None) or (n in ancestors)):
370 # Only include nodes that are both descendents and ancestors.
370 # Only include nodes that are both descendents and ancestors.
371 orderedout.append(n)
371 orderedout.append(n)
372 if (ancestors is not None) and (n in heads):
372 if (ancestors is not None) and (n in heads):
373 # We're trying to figure out which heads are reachable
373 # We're trying to figure out which heads are reachable
374 # from roots.
374 # from roots.
375 # Mark this head as having been reached
375 # Mark this head as having been reached
376 heads[n] = 1
376 heads[n] = 1
377 elif ancestors is None:
377 elif ancestors is None:
378 # Otherwise, we're trying to discover the heads.
378 # Otherwise, we're trying to discover the heads.
379 # Assume this is a head because if it isn't, the next step
379 # Assume this is a head because if it isn't, the next step
380 # will eventually remove it.
380 # will eventually remove it.
381 heads[n] = 1
381 heads[n] = 1
382 # But, obviously its parents aren't.
382 # But, obviously its parents aren't.
383 for p in self.parents(n):
383 for p in self.parents(n):
384 heads.pop(p, None)
384 heads.pop(p, None)
385 heads = [n for n in heads.iterkeys() if heads[n] != 0]
385 heads = [n for n in heads.iterkeys() if heads[n] != 0]
386 roots = roots.keys()
386 roots = roots.keys()
387 assert orderedout
387 assert orderedout
388 assert roots
388 assert roots
389 assert heads
389 assert heads
390 return (orderedout, roots, heads)
390 return (orderedout, roots, heads)
391
391
392 def heads(self, stop=None):
392 def heads(self, stop=None):
393 """return the list of all nodes that have no children"""
393 """return the list of all nodes that have no children"""
394 p = {}
394 p = {}
395 h = []
395 h = []
396 stoprev = 0
396 stoprev = 0
397 if stop and stop in self.nodemap:
397 if stop and stop in self.nodemap:
398 stoprev = self.rev(stop)
398 stoprev = self.rev(stop)
399
399
400 for r in range(self.count() - 1, -1, -1):
400 for r in range(self.count() - 1, -1, -1):
401 n = self.node(r)
401 n = self.node(r)
402 if n not in p:
402 if n not in p:
403 h.append(n)
403 h.append(n)
404 if n == stop:
404 if n == stop:
405 break
405 break
406 if r < stoprev:
406 if r < stoprev:
407 break
407 break
408 for pn in self.parents(n):
408 for pn in self.parents(n):
409 p[pn] = 1
409 p[pn] = 1
410 return h
410 return h
411
411
412 def children(self, node):
412 def children(self, node):
413 """find the children of a given node"""
413 """find the children of a given node"""
414 c = []
414 c = []
415 p = self.rev(node)
415 p = self.rev(node)
416 for r in range(p + 1, self.count()):
416 for r in range(p + 1, self.count()):
417 n = self.node(r)
417 n = self.node(r)
418 for pn in self.parents(n):
418 for pn in self.parents(n):
419 if pn == node:
419 if pn == node:
420 c.append(n)
420 c.append(n)
421 continue
421 continue
422 elif pn == nullid:
422 elif pn == nullid:
423 continue
423 continue
424 return c
424 return c
425
425
426 def lookup(self, id):
426 def lookup(self, id):
427 """locate a node based on revision number or subset of hex nodeid"""
427 """locate a node based on revision number or subset of hex nodeid"""
428 try:
428 try:
429 rev = int(id)
429 rev = int(id)
430 if str(rev) != id: raise ValueError
430 if str(rev) != id: raise ValueError
431 if rev < 0: rev = self.count() + rev
431 if rev < 0: rev = self.count() + rev
432 if rev < 0 or rev >= self.count(): raise ValueError
432 if rev < 0 or rev >= self.count(): raise ValueError
433 return self.node(rev)
433 return self.node(rev)
434 except (ValueError, OverflowError):
434 except (ValueError, OverflowError):
435 c = []
435 c = []
436 for n in self.nodemap:
436 for n in self.nodemap:
437 if hex(n).startswith(id):
437 if hex(n).startswith(id):
438 c.append(n)
438 c.append(n)
439 if len(c) > 1: raise KeyError("Ambiguous identifier")
439 if len(c) > 1: raise KeyError("Ambiguous identifier")
440 if len(c) < 1: raise KeyError("No match found")
440 if len(c) < 1: raise KeyError("No match found")
441 return c[0]
441 return c[0]
442
442
443 return None
443 return None
444
444
445 def diff(self, a, b):
445 def diff(self, a, b):
446 """return a delta between two revisions"""
446 """return a delta between two revisions"""
447 return mdiff.textdiff(a, b)
447 return mdiff.textdiff(a, b)
448
448
449 def patches(self, t, pl):
449 def patches(self, t, pl):
450 """apply a list of patches to a string"""
450 """apply a list of patches to a string"""
451 return mdiff.patches(t, pl)
451 return mdiff.patches(t, pl)
452
452
453 def delta(self, node):
453 def delta(self, node):
454 """return or calculate a delta between a node and its predecessor"""
454 """return or calculate a delta between a node and its predecessor"""
455 r = self.rev(node)
455 r = self.rev(node)
456 b = self.base(r)
456 b = self.base(r)
457 if r == b:
457 if r == b:
458 return self.diff(self.revision(self.node(r - 1)),
458 return self.diff(self.revision(self.node(r - 1)),
459 self.revision(node))
459 self.revision(node))
460 else:
460 else:
461 f = self.opener(self.datafile)
461 f = self.opener(self.datafile)
462 f.seek(self.start(r))
462 f.seek(self.start(r))
463 data = f.read(self.length(r))
463 data = f.read(self.length(r))
464 return decompress(data)
464 return decompress(data)
465
465
466 def revision(self, node):
466 def revision(self, node):
467 """return an uncompressed revision of a given"""
467 """return an uncompressed revision of a given"""
468 if node == nullid: return ""
468 if node == nullid: return ""
469 if self.cache and self.cache[0] == node: return self.cache[2]
469 if self.cache and self.cache[0] == node: return self.cache[2]
470
470
471 # look up what we need to read
471 # look up what we need to read
472 text = None
472 text = None
473 rev = self.rev(node)
473 rev = self.rev(node)
474 start, length, base, link, p1, p2, node = self.index[rev]
474 start, length, base, link, p1, p2, node = self.index[rev]
475 end = start + length
475 end = start + length
476 if base != rev: start = self.start(base)
476 if base != rev: start = self.start(base)
477
477
478 # do we have useful data cached?
478 # do we have useful data cached?
479 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
479 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
480 base = self.cache[1]
480 base = self.cache[1]
481 start = self.start(base + 1)
481 start = self.start(base + 1)
482 text = self.cache[2]
482 text = self.cache[2]
483 last = 0
483 last = 0
484
484
485 f = self.opener(self.datafile)
485 f = self.opener(self.datafile)
486 f.seek(start)
486 f.seek(start)
487 data = f.read(end - start)
487 data = f.read(end - start)
488
488
489 if text is None:
489 if text is None:
490 last = self.length(base)
490 last = self.length(base)
491 text = decompress(data[:last])
491 text = decompress(data[:last])
492
492
493 bins = []
493 bins = []
494 for r in xrange(base + 1, rev + 1):
494 for r in xrange(base + 1, rev + 1):
495 s = self.length(r)
495 s = self.length(r)
496 bins.append(decompress(data[last:last + s]))
496 bins.append(decompress(data[last:last + s]))
497 last = last + s
497 last = last + s
498
498
499 text = mdiff.patches(text, bins)
499 text = mdiff.patches(text, bins)
500
500
501 if node != hash(text, p1, p2):
501 if node != hash(text, p1, p2):
502 raise RevlogError("integrity check failed on %s:%d"
502 raise RevlogError("integrity check failed on %s:%d"
503 % (self.datafile, rev))
503 % (self.datafile, rev))
504
504
505 self.cache = (node, rev, text)
505 self.cache = (node, rev, text)
506 return text
506 return text
507
507
508 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
508 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
509 """add a revision to the log
509 """add a revision to the log
510
510
511 text - the revision data to add
511 text - the revision data to add
512 transaction - the transaction object used for rollback
512 transaction - the transaction object used for rollback
513 link - the linkrev data to add
513 link - the linkrev data to add
514 p1, p2 - the parent nodeids of the revision
514 p1, p2 - the parent nodeids of the revision
515 d - an optional precomputed delta
515 d - an optional precomputed delta
516 """
516 """
517 if text is None: text = ""
517 if text is None: text = ""
518 if p1 is None: p1 = self.tip()
518 if p1 is None: p1 = self.tip()
519 if p2 is None: p2 = nullid
519 if p2 is None: p2 = nullid
520
520
521 node = hash(text, p1, p2)
521 node = hash(text, p1, p2)
522
522
523 if node in self.nodemap:
523 if node in self.nodemap:
524 return node
524 return node
525
525
526 n = self.count()
526 n = self.count()
527 t = n - 1
527 t = n - 1
528
528
529 if n:
529 if n:
530 base = self.base(t)
530 base = self.base(t)
531 start = self.start(base)
531 start = self.start(base)
532 end = self.end(t)
532 end = self.end(t)
533 if not d:
533 if not d:
534 prev = self.revision(self.tip())
534 prev = self.revision(self.tip())
535 d = self.diff(prev, text)
535 d = self.diff(prev, text)
536 data = compress(d)
536 data = compress(d)
537 dist = end - start + len(data)
537 dist = end - start + len(data)
538
538
539 # full versions are inserted when the needed deltas
539 # full versions are inserted when the needed deltas
540 # become comparable to the uncompressed text
540 # become comparable to the uncompressed text
541 if not n or dist > len(text) * 2:
541 if not n or dist > len(text) * 2:
542 data = compress(text)
542 data = compress(text)
543 base = n
543 base = n
544 else:
544 else:
545 base = self.base(t)
545 base = self.base(t)
546
546
547 offset = 0
547 offset = 0
548 if t >= 0:
548 if t >= 0:
549 offset = self.end(t)
549 offset = self.end(t)
550
550
551 e = (offset, len(data), base, link, p1, p2, node)
551 e = (offset, len(data), base, link, p1, p2, node)
552
552
553 self.index.append(e)
553 self.index.append(e)
554 self.nodemap[node] = n
554 self.nodemap[node] = n
555 entry = struct.pack(indexformat, *e)
555 entry = struct.pack(indexformat, *e)
556
556
557 transaction.add(self.datafile, e[0])
557 transaction.add(self.datafile, e[0])
558 self.opener(self.datafile, "a").write(data)
558 self.opener(self.datafile, "a").write(data)
559 transaction.add(self.indexfile, n * len(entry))
559 transaction.add(self.indexfile, n * len(entry))
560 self.opener(self.indexfile, "a").write(entry)
560 self.opener(self.indexfile, "a").write(entry)
561
561
562 self.cache = (node, n, text)
562 self.cache = (node, n, text)
563 return node
563 return node
564
564
565 def ancestor(self, a, b):
565 def ancestor(self, a, b):
566 """calculate the least common ancestor of nodes a and b"""
566 """calculate the least common ancestor of nodes a and b"""
567 # calculate the distance of every node from root
567 # calculate the distance of every node from root
568 dist = {nullid: 0}
568 dist = {nullid: 0}
569 for i in xrange(self.count()):
569 for i in xrange(self.count()):
570 n = self.node(i)
570 n = self.node(i)
571 p1, p2 = self.parents(n)
571 p1, p2 = self.parents(n)
572 dist[n] = max(dist[p1], dist[p2]) + 1
572 dist[n] = max(dist[p1], dist[p2]) + 1
573
573
574 # traverse ancestors in order of decreasing distance from root
574 # traverse ancestors in order of decreasing distance from root
575 def ancestors(node):
575 def ancestors(node):
576 # we store negative distances because heap returns smallest member
576 # we store negative distances because heap returns smallest member
577 h = [(-dist[node], node)]
577 h = [(-dist[node], node)]
578 seen = {}
578 seen = {}
579 earliest = self.count()
579 earliest = self.count()
580 while h:
580 while h:
581 d, n = heapq.heappop(h)
581 d, n = heapq.heappop(h)
582 if n not in seen:
582 if n not in seen:
583 seen[n] = 1
583 seen[n] = 1
584 r = self.rev(n)
584 r = self.rev(n)
585 yield (-d, n)
585 yield (-d, n)
586 for p in self.parents(n):
586 for p in self.parents(n):
587 heapq.heappush(h, (-dist[p], p))
587 heapq.heappush(h, (-dist[p], p))
588
588
589 def generations(node):
589 def generations(node):
590 sg, s = None, {}
590 sg, s = None, {}
591 for g,n in ancestors(node):
591 for g,n in ancestors(node):
592 if g != sg:
592 if g != sg:
593 if sg:
593 if sg:
594 yield sg, s
594 yield sg, s
595 sg, s = g, {n:1}
595 sg, s = g, {n:1}
596 else:
596 else:
597 s[n] = 1
597 s[n] = 1
598 yield sg, s
598 yield sg, s
599
599
600 x = generations(a)
600 x = generations(a)
601 y = generations(b)
601 y = generations(b)
602 gx = x.next()
602 gx = x.next()
603 gy = y.next()
603 gy = y.next()
604
604
605 # increment each ancestor list until it is closer to root than
605 # increment each ancestor list until it is closer to root than
606 # the other, or they match
606 # the other, or they match
607 while 1:
607 while 1:
608 #print "ancestor gen %s %s" % (gx[0], gy[0])
608 #print "ancestor gen %s %s" % (gx[0], gy[0])
609 if gx[0] == gy[0]:
609 if gx[0] == gy[0]:
610 # find the intersection
610 # find the intersection
611 i = [ n for n in gx[1] if n in gy[1] ]
611 i = [ n for n in gx[1] if n in gy[1] ]
612 if i:
612 if i:
613 return i[0]
613 return i[0]
614 else:
614 else:
615 #print "next"
615 #print "next"
616 gy = y.next()
616 gy = y.next()
617 gx = x.next()
617 gx = x.next()
618 elif gx[0] < gy[0]:
618 elif gx[0] < gy[0]:
619 #print "next y"
619 #print "next y"
620 gy = y.next()
620 gy = y.next()
621 else:
621 else:
622 #print "next x"
622 #print "next x"
623 gx = x.next()
623 gx = x.next()
624
624
625 def group(self, linkmap):
625 def group(self, nodelist, lookup, infocollect = None):
626 """calculate a delta group
626 """calculate a delta group
627
627
628 Given a list of changeset revs, return a set of deltas and
628 Given a list of changeset revs, return a set of deltas and
629 metadata corresponding to nodes. the first delta is
629 metadata corresponding to nodes. the first delta is
630 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
630 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
631 have this parent as it has all history before these
631 have this parent as it has all history before these
632 changesets. parent is parent[0]
632 changesets. parent is parent[0]
633 """
633 """
634 revs = []
634 revs = [self.rev(n) for n in nodelist]
635 needed = {}
635 needed = dict.fromkeys(revs, 1)
636
637 # find file nodes/revs that match changeset revs
638 for i in xrange(0, self.count()):
639 if self.index[i][3] in linkmap:
640 revs.append(i)
641 needed[i] = 1
642
636
643 # if we don't have any revisions touched by these changesets, bail
637 # if we don't have any revisions touched by these changesets, bail
644 if not revs:
638 if not revs:
645 yield struct.pack(">l", 0)
639 yield struct.pack(">l", 0)
646 return
640 return
647
641
648 # add the parent of the first rev
642 # add the parent of the first rev
649 p = self.parents(self.node(revs[0]))[0]
643 p = self.parents(self.node(revs[0]))[0]
650 revs.insert(0, self.rev(p))
644 revs.insert(0, self.rev(p))
651
645
652 # for each delta that isn't contiguous in the log, we need to
646 # for each delta that isn't contiguous in the log, we need to
653 # reconstruct the base, reconstruct the result, and then
647 # reconstruct the base, reconstruct the result, and then
654 # calculate the delta. We also need to do this where we've
648 # calculate the delta. We also need to do this where we've
655 # stored a full version and not a delta
649 # stored a full version and not a delta
656 for i in xrange(0, len(revs) - 1):
650 for i in xrange(0, len(revs) - 1):
657 a, b = revs[i], revs[i + 1]
651 a, b = revs[i], revs[i + 1]
658 if a + 1 != b or self.base(b) == b:
652 if a + 1 != b or self.base(b) == b:
659 for j in xrange(self.base(a), a + 1):
653 for j in xrange(self.base(a), a + 1):
660 needed[j] = 1
654 needed[j] = 1
661 for j in xrange(self.base(b), b + 1):
655 for j in xrange(self.base(b), b + 1):
662 needed[j] = 1
656 needed[j] = 1
663
657
664 # calculate spans to retrieve from datafile
658 # calculate spans to retrieve from datafile
665 needed = needed.keys()
659 needed = needed.keys()
666 needed.sort()
660 needed.sort()
667 spans = []
661 spans = []
668 oo = -1
662 oo = -1
669 ol = 0
663 ol = 0
670 for n in needed:
664 for n in needed:
671 if n < 0: continue
665 if n < 0: continue
672 o = self.start(n)
666 o = self.start(n)
673 l = self.length(n)
667 l = self.length(n)
674 if oo + ol == o: # can we merge with the previous?
668 if oo + ol == o: # can we merge with the previous?
675 nl = spans[-1][2]
669 nl = spans[-1][2]
676 nl.append((n, l))
670 nl.append((n, l))
677 ol += l
671 ol += l
678 spans[-1] = (oo, ol, nl)
672 spans[-1] = (oo, ol, nl)
679 else:
673 else:
680 oo = o
674 oo = o
681 ol = l
675 ol = l
682 spans.append((oo, ol, [(n, l)]))
676 spans.append((oo, ol, [(n, l)]))
683
677
684 # read spans in, divide up chunks
678 # read spans in, divide up chunks
685 chunks = {}
679 chunks = {}
686 for span in spans:
680 for span in spans:
687 # we reopen the file for each span to make http happy for now
681 # we reopen the file for each span to make http happy for now
688 f = self.opener(self.datafile)
682 f = self.opener(self.datafile)
689 f.seek(span[0])
683 f.seek(span[0])
690 data = f.read(span[1])
684 data = f.read(span[1])
691
685
692 # divide up the span
686 # divide up the span
693 pos = 0
687 pos = 0
694 for r, l in span[2]:
688 for r, l in span[2]:
695 chunks[r] = decompress(data[pos: pos + l])
689 chunks[r] = decompress(data[pos: pos + l])
696 pos += l
690 pos += l
697
691
698 # helper to reconstruct intermediate versions
692 # helper to reconstruct intermediate versions
699 def construct(text, base, rev):
693 def construct(text, base, rev):
700 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
694 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
701 return mdiff.patches(text, bins)
695 return mdiff.patches(text, bins)
702
696
703 # build deltas
697 # build deltas
704 deltas = []
698 deltas = []
705 for d in xrange(0, len(revs) - 1):
699 for d in xrange(0, len(revs) - 1):
706 a, b = revs[d], revs[d + 1]
700 a, b = revs[d], revs[d + 1]
707 n = self.node(b)
701 n = self.node(b)
708
702
703 if infocollect is not None:
704 infocollect(n)
705
709 # do we need to construct a new delta?
706 # do we need to construct a new delta?
710 if a + 1 != b or self.base(b) == b:
707 if a + 1 != b or self.base(b) == b:
711 if a >= 0:
708 if a >= 0:
712 base = self.base(a)
709 base = self.base(a)
713 ta = chunks[self.base(a)]
710 ta = chunks[self.base(a)]
714 ta = construct(ta, base, a)
711 ta = construct(ta, base, a)
715 else:
712 else:
716 ta = ""
713 ta = ""
717
714
718 base = self.base(b)
715 base = self.base(b)
719 if a > base:
716 if a > base:
720 base = a
717 base = a
721 tb = ta
718 tb = ta
722 else:
719 else:
723 tb = chunks[self.base(b)]
720 tb = chunks[self.base(b)]
724 tb = construct(tb, base, b)
721 tb = construct(tb, base, b)
725 d = self.diff(ta, tb)
722 d = self.diff(ta, tb)
726 else:
723 else:
727 d = chunks[b]
724 d = chunks[b]
728
725
729 p = self.parents(n)
726 p = self.parents(n)
730 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
727 meta = n + p[0] + p[1] + lookup(n)
731 l = struct.pack(">l", len(meta) + len(d) + 4)
728 l = struct.pack(">l", len(meta) + len(d) + 4)
732 yield l
729 yield l
733 yield meta
730 yield meta
734 yield d
731 yield d
735
732
736 yield struct.pack(">l", 0)
733 yield struct.pack(">l", 0)
737
734
738 def addgroup(self, revs, linkmapper, transaction, unique=0):
735 def addgroup(self, revs, linkmapper, transaction, unique=0):
739 """
736 """
740 add a delta group
737 add a delta group
741
738
742 given a set of deltas, add them to the revision log. the
739 given a set of deltas, add them to the revision log. the
743 first delta is against its parent, which should be in our
740 first delta is against its parent, which should be in our
744 log, the rest are against the previous delta.
741 log, the rest are against the previous delta.
745 """
742 """
746
743
747 #track the base of the current delta log
744 #track the base of the current delta log
748 r = self.count()
745 r = self.count()
749 t = r - 1
746 t = r - 1
750 node = nullid
747 node = nullid
751
748
752 base = prev = -1
749 base = prev = -1
753 start = end = measure = 0
750 start = end = measure = 0
754 if r:
751 if r:
755 start = self.start(self.base(t))
752 start = self.start(self.base(t))
756 end = self.end(t)
753 end = self.end(t)
757 measure = self.length(self.base(t))
754 measure = self.length(self.base(t))
758 base = self.base(t)
755 base = self.base(t)
759 prev = self.tip()
756 prev = self.tip()
760
757
761 transaction.add(self.datafile, end)
758 transaction.add(self.datafile, end)
762 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
759 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
763 dfh = self.opener(self.datafile, "a")
760 dfh = self.opener(self.datafile, "a")
764 ifh = self.opener(self.indexfile, "a")
761 ifh = self.opener(self.indexfile, "a")
765
762
766 # loop through our set of deltas
763 # loop through our set of deltas
767 chain = None
764 chain = None
768 for chunk in revs:
765 for chunk in revs:
769 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
766 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
770 link = linkmapper(cs)
767 link = linkmapper(cs)
771 if node in self.nodemap:
768 if node in self.nodemap:
772 # this can happen if two branches make the same change
769 # this can happen if two branches make the same change
773 # if unique:
770 # if unique:
774 # raise RevlogError("already have %s" % hex(node[:4]))
771 # raise RevlogError("already have %s" % hex(node[:4]))
775 chain = node
772 chain = node
776 continue
773 continue
777 delta = chunk[80:]
774 delta = chunk[80:]
778
775
779 if not chain:
776 if not chain:
780 # retrieve the parent revision of the delta chain
777 # retrieve the parent revision of the delta chain
781 chain = p1
778 chain = p1
782 if not chain in self.nodemap:
779 if not chain in self.nodemap:
783 raise RevlogError("unknown base %s" % short(chain[:4]))
780 raise RevlogError("unknown base %s" % short(chain[:4]))
784
781
785 # full versions are inserted when the needed deltas become
782 # full versions are inserted when the needed deltas become
786 # comparable to the uncompressed text or when the previous
783 # comparable to the uncompressed text or when the previous
787 # version is not the one we have a delta against. We use
784 # version is not the one we have a delta against. We use
788 # the size of the previous full rev as a proxy for the
785 # the size of the previous full rev as a proxy for the
789 # current size.
786 # current size.
790
787
791 if chain == prev:
788 if chain == prev:
792 cdelta = compress(delta)
789 cdelta = compress(delta)
793
790
794 if chain != prev or (end - start + len(cdelta)) > measure * 2:
791 if chain != prev or (end - start + len(cdelta)) > measure * 2:
795 # flush our writes here so we can read it in revision
792 # flush our writes here so we can read it in revision
796 dfh.flush()
793 dfh.flush()
797 ifh.flush()
794 ifh.flush()
798 text = self.revision(chain)
795 text = self.revision(chain)
799 text = self.patches(text, [delta])
796 text = self.patches(text, [delta])
800 chk = self.addrevision(text, transaction, link, p1, p2)
797 chk = self.addrevision(text, transaction, link, p1, p2)
801 if chk != node:
798 if chk != node:
802 raise RevlogError("consistency error adding group")
799 raise RevlogError("consistency error adding group")
803 measure = len(text)
800 measure = len(text)
804 else:
801 else:
805 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
802 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
806 self.index.append(e)
803 self.index.append(e)
807 self.nodemap[node] = r
804 self.nodemap[node] = r
808 dfh.write(cdelta)
805 dfh.write(cdelta)
809 ifh.write(struct.pack(indexformat, *e))
806 ifh.write(struct.pack(indexformat, *e))
810
807
811 t, r, chain, prev = r, r + 1, node, node
808 t, r, chain, prev = r, r + 1, node, node
812 start = self.start(self.base(t))
809 start = self.start(self.base(t))
813 end = self.end(t)
810 end = self.end(t)
814
811
815 dfh.close()
812 dfh.close()
816 ifh.close()
813 ifh.close()
817 return node
814 return node
General Comments 0
You need to be logged in to leave comments. Login now