##// END OF EJS Templates
Implement data inlined with the index file...
mason@suse.com -
r2073:1e6745f7 default
parent child Browse files
Show More
@@ -1,1963 +1,1974 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
15
15
16 class localrepository(object):
16 class localrepository(object):
17 def __del__(self):
17 def __del__(self):
18 self.transhandle = None
18 self.transhandle = None
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 if not path:
20 if not path:
21 p = os.getcwd()
21 p = os.getcwd()
22 while not os.path.isdir(os.path.join(p, ".hg")):
22 while not os.path.isdir(os.path.join(p, ".hg")):
23 oldp = p
23 oldp = p
24 p = os.path.dirname(p)
24 p = os.path.dirname(p)
25 if p == oldp:
25 if p == oldp:
26 raise repo.RepoError(_("no repo found"))
26 raise repo.RepoError(_("no repo found"))
27 path = p
27 path = p
28 self.path = os.path.join(path, ".hg")
28 self.path = os.path.join(path, ".hg")
29
29
30 if not create and not os.path.isdir(self.path):
30 if not create and not os.path.isdir(self.path):
31 raise repo.RepoError(_("repository %s not found") % path)
31 raise repo.RepoError(_("repository %s not found") % path)
32
32
33 self.root = os.path.abspath(path)
33 self.root = os.path.abspath(path)
34 self.origroot = path
34 self.origroot = path
35 self.ui = ui.ui(parentui=parentui)
35 self.ui = ui.ui(parentui=parentui)
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 except IOError:
41 except IOError:
42 pass
42 pass
43
43
44 v = self.ui.revlogopts
44 v = self.ui.revlogopts
45 self.revlogversion = int(v.get('format', 0))
45 self.revlogversion = int(v.get('format', 0))
46 flags = 0
46 for x in v.get('flags', "").split():
47 for x in v.get('flags', "").split():
47 self.revlogversion |= revlog.flagstr(x)
48 flags |= revlog.flagstr(x)
49
50 v = self.revlogversion | flags
51 self.manifest = manifest.manifest(self.opener, v)
52 self.changelog = changelog.changelog(self.opener, v)
48
53
49 self.manifest = manifest.manifest(self.opener, self.revlogversion)
54 # the changelog might not have the inline index flag
50 self.changelog = changelog.changelog(self.opener, self.revlogversion)
55 # on. If the format of the changelog is the same as found in
51 self.revlogversion = self.changelog.version
56 # .hgrc, apply any flags found in the .hgrc as well.
57 # Otherwise, just version from the changelog
58 v = self.changelog.version
59 if v == self.revlogversion:
60 v |= flags
61 self.revlogversion = v
62
52 self.tagscache = None
63 self.tagscache = None
53 self.nodetagscache = None
64 self.nodetagscache = None
54 self.encodepats = None
65 self.encodepats = None
55 self.decodepats = None
66 self.decodepats = None
56 self.transhandle = None
67 self.transhandle = None
57
68
58 if create:
69 if create:
59 os.mkdir(self.path)
70 os.mkdir(self.path)
60 os.mkdir(self.join("data"))
71 os.mkdir(self.join("data"))
61
72
62 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
73 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
63 def hook(self, name, throw=False, **args):
74 def hook(self, name, throw=False, **args):
64 def runhook(name, cmd):
75 def runhook(name, cmd):
65 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
76 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
66 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
77 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
67 [(k.upper(), v) for k, v in args.iteritems()])
78 [(k.upper(), v) for k, v in args.iteritems()])
68 r = util.system(cmd, environ=env, cwd=self.root)
79 r = util.system(cmd, environ=env, cwd=self.root)
69 if r:
80 if r:
70 desc, r = util.explain_exit(r)
81 desc, r = util.explain_exit(r)
71 if throw:
82 if throw:
72 raise util.Abort(_('%s hook %s') % (name, desc))
83 raise util.Abort(_('%s hook %s') % (name, desc))
73 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
84 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
74 return False
85 return False
75 return True
86 return True
76
87
77 r = True
88 r = True
78 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
89 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
79 if hname.split(".", 1)[0] == name and cmd]
90 if hname.split(".", 1)[0] == name and cmd]
80 hooks.sort()
91 hooks.sort()
81 for hname, cmd in hooks:
92 for hname, cmd in hooks:
82 r = runhook(hname, cmd) and r
93 r = runhook(hname, cmd) and r
83 return r
94 return r
84
95
85 def tags(self):
96 def tags(self):
86 '''return a mapping of tag to node'''
97 '''return a mapping of tag to node'''
87 if not self.tagscache:
98 if not self.tagscache:
88 self.tagscache = {}
99 self.tagscache = {}
89
100
90 def parsetag(line, context):
101 def parsetag(line, context):
91 if not line:
102 if not line:
92 return
103 return
93 s = l.split(" ", 1)
104 s = l.split(" ", 1)
94 if len(s) != 2:
105 if len(s) != 2:
95 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
106 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
96 return
107 return
97 node, key = s
108 node, key = s
98 try:
109 try:
99 bin_n = bin(node)
110 bin_n = bin(node)
100 except TypeError:
111 except TypeError:
101 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
112 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
102 return
113 return
103 if bin_n not in self.changelog.nodemap:
114 if bin_n not in self.changelog.nodemap:
104 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
115 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
105 return
116 return
106 self.tagscache[key.strip()] = bin_n
117 self.tagscache[key.strip()] = bin_n
107
118
108 # read each head of the tags file, ending with the tip
119 # read each head of the tags file, ending with the tip
109 # and add each tag found to the map, with "newer" ones
120 # and add each tag found to the map, with "newer" ones
110 # taking precedence
121 # taking precedence
111 fl = self.file(".hgtags")
122 fl = self.file(".hgtags")
112 h = fl.heads()
123 h = fl.heads()
113 h.reverse()
124 h.reverse()
114 for r in h:
125 for r in h:
115 count = 0
126 count = 0
116 for l in fl.read(r).splitlines():
127 for l in fl.read(r).splitlines():
117 count += 1
128 count += 1
118 parsetag(l, ".hgtags:%d" % count)
129 parsetag(l, ".hgtags:%d" % count)
119
130
120 try:
131 try:
121 f = self.opener("localtags")
132 f = self.opener("localtags")
122 count = 0
133 count = 0
123 for l in f:
134 for l in f:
124 count += 1
135 count += 1
125 parsetag(l, "localtags:%d" % count)
136 parsetag(l, "localtags:%d" % count)
126 except IOError:
137 except IOError:
127 pass
138 pass
128
139
129 self.tagscache['tip'] = self.changelog.tip()
140 self.tagscache['tip'] = self.changelog.tip()
130
141
131 return self.tagscache
142 return self.tagscache
132
143
133 def tagslist(self):
144 def tagslist(self):
134 '''return a list of tags ordered by revision'''
145 '''return a list of tags ordered by revision'''
135 l = []
146 l = []
136 for t, n in self.tags().items():
147 for t, n in self.tags().items():
137 try:
148 try:
138 r = self.changelog.rev(n)
149 r = self.changelog.rev(n)
139 except:
150 except:
140 r = -2 # sort to the beginning of the list if unknown
151 r = -2 # sort to the beginning of the list if unknown
141 l.append((r, t, n))
152 l.append((r, t, n))
142 l.sort()
153 l.sort()
143 return [(t, n) for r, t, n in l]
154 return [(t, n) for r, t, n in l]
144
155
145 def nodetags(self, node):
156 def nodetags(self, node):
146 '''return the tags associated with a node'''
157 '''return the tags associated with a node'''
147 if not self.nodetagscache:
158 if not self.nodetagscache:
148 self.nodetagscache = {}
159 self.nodetagscache = {}
149 for t, n in self.tags().items():
160 for t, n in self.tags().items():
150 self.nodetagscache.setdefault(n, []).append(t)
161 self.nodetagscache.setdefault(n, []).append(t)
151 return self.nodetagscache.get(node, [])
162 return self.nodetagscache.get(node, [])
152
163
153 def lookup(self, key):
164 def lookup(self, key):
154 try:
165 try:
155 return self.tags()[key]
166 return self.tags()[key]
156 except KeyError:
167 except KeyError:
157 try:
168 try:
158 return self.changelog.lookup(key)
169 return self.changelog.lookup(key)
159 except:
170 except:
160 raise repo.RepoError(_("unknown revision '%s'") % key)
171 raise repo.RepoError(_("unknown revision '%s'") % key)
161
172
162 def dev(self):
173 def dev(self):
163 return os.stat(self.path).st_dev
174 return os.stat(self.path).st_dev
164
175
165 def local(self):
176 def local(self):
166 return True
177 return True
167
178
168 def join(self, f):
179 def join(self, f):
169 return os.path.join(self.path, f)
180 return os.path.join(self.path, f)
170
181
171 def wjoin(self, f):
182 def wjoin(self, f):
172 return os.path.join(self.root, f)
183 return os.path.join(self.root, f)
173
184
174 def file(self, f):
185 def file(self, f):
175 if f[0] == '/':
186 if f[0] == '/':
176 f = f[1:]
187 f = f[1:]
177 return filelog.filelog(self.opener, f, self.revlogversion)
188 return filelog.filelog(self.opener, f, self.revlogversion)
178
189
179 def getcwd(self):
190 def getcwd(self):
180 return self.dirstate.getcwd()
191 return self.dirstate.getcwd()
181
192
182 def wfile(self, f, mode='r'):
193 def wfile(self, f, mode='r'):
183 return self.wopener(f, mode)
194 return self.wopener(f, mode)
184
195
185 def wread(self, filename):
196 def wread(self, filename):
186 if self.encodepats == None:
197 if self.encodepats == None:
187 l = []
198 l = []
188 for pat, cmd in self.ui.configitems("encode"):
199 for pat, cmd in self.ui.configitems("encode"):
189 mf = util.matcher(self.root, "", [pat], [], [])[1]
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
190 l.append((mf, cmd))
201 l.append((mf, cmd))
191 self.encodepats = l
202 self.encodepats = l
192
203
193 data = self.wopener(filename, 'r').read()
204 data = self.wopener(filename, 'r').read()
194
205
195 for mf, cmd in self.encodepats:
206 for mf, cmd in self.encodepats:
196 if mf(filename):
207 if mf(filename):
197 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
208 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
198 data = util.filter(data, cmd)
209 data = util.filter(data, cmd)
199 break
210 break
200
211
201 return data
212 return data
202
213
203 def wwrite(self, filename, data, fd=None):
214 def wwrite(self, filename, data, fd=None):
204 if self.decodepats == None:
215 if self.decodepats == None:
205 l = []
216 l = []
206 for pat, cmd in self.ui.configitems("decode"):
217 for pat, cmd in self.ui.configitems("decode"):
207 mf = util.matcher(self.root, "", [pat], [], [])[1]
218 mf = util.matcher(self.root, "", [pat], [], [])[1]
208 l.append((mf, cmd))
219 l.append((mf, cmd))
209 self.decodepats = l
220 self.decodepats = l
210
221
211 for mf, cmd in self.decodepats:
222 for mf, cmd in self.decodepats:
212 if mf(filename):
223 if mf(filename):
213 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
224 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
214 data = util.filter(data, cmd)
225 data = util.filter(data, cmd)
215 break
226 break
216
227
217 if fd:
228 if fd:
218 return fd.write(data)
229 return fd.write(data)
219 return self.wopener(filename, 'w').write(data)
230 return self.wopener(filename, 'w').write(data)
220
231
221 def transaction(self):
232 def transaction(self):
222 tr = self.transhandle
233 tr = self.transhandle
223 if tr != None and tr.running():
234 if tr != None and tr.running():
224 return tr.nest()
235 return tr.nest()
225
236
226 # save dirstate for undo
237 # save dirstate for undo
227 try:
238 try:
228 ds = self.opener("dirstate").read()
239 ds = self.opener("dirstate").read()
229 except IOError:
240 except IOError:
230 ds = ""
241 ds = ""
231 self.opener("journal.dirstate", "w").write(ds)
242 self.opener("journal.dirstate", "w").write(ds)
232
243
233 tr = transaction.transaction(self.ui.warn, self.opener,
244 tr = transaction.transaction(self.ui.warn, self.opener,
234 self.join("journal"),
245 self.join("journal"),
235 aftertrans(self.path))
246 aftertrans(self.path))
236 self.transhandle = tr
247 self.transhandle = tr
237 return tr
248 return tr
238
249
239 def recover(self):
250 def recover(self):
240 l = self.lock()
251 l = self.lock()
241 if os.path.exists(self.join("journal")):
252 if os.path.exists(self.join("journal")):
242 self.ui.status(_("rolling back interrupted transaction\n"))
253 self.ui.status(_("rolling back interrupted transaction\n"))
243 transaction.rollback(self.opener, self.join("journal"))
254 transaction.rollback(self.opener, self.join("journal"))
244 self.reload()
255 self.reload()
245 return True
256 return True
246 else:
257 else:
247 self.ui.warn(_("no interrupted transaction available\n"))
258 self.ui.warn(_("no interrupted transaction available\n"))
248 return False
259 return False
249
260
250 def undo(self, wlock=None):
261 def undo(self, wlock=None):
251 if not wlock:
262 if not wlock:
252 wlock = self.wlock()
263 wlock = self.wlock()
253 l = self.lock()
264 l = self.lock()
254 if os.path.exists(self.join("undo")):
265 if os.path.exists(self.join("undo")):
255 self.ui.status(_("rolling back last transaction\n"))
266 self.ui.status(_("rolling back last transaction\n"))
256 transaction.rollback(self.opener, self.join("undo"))
267 transaction.rollback(self.opener, self.join("undo"))
257 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
268 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
258 self.reload()
269 self.reload()
259 self.wreload()
270 self.wreload()
260 else:
271 else:
261 self.ui.warn(_("no undo information available\n"))
272 self.ui.warn(_("no undo information available\n"))
262
273
263 def wreload(self):
274 def wreload(self):
264 self.dirstate.read()
275 self.dirstate.read()
265
276
266 def reload(self):
277 def reload(self):
267 self.changelog.load()
278 self.changelog.load()
268 self.manifest.load()
279 self.manifest.load()
269 self.tagscache = None
280 self.tagscache = None
270 self.nodetagscache = None
281 self.nodetagscache = None
271
282
272 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
283 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
273 desc=None):
284 desc=None):
274 try:
285 try:
275 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
286 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
276 except lock.LockHeld, inst:
287 except lock.LockHeld, inst:
277 if not wait:
288 if not wait:
278 raise
289 raise
279 self.ui.warn(_("waiting for lock on %s held by %s\n") %
290 self.ui.warn(_("waiting for lock on %s held by %s\n") %
280 (desc, inst.args[0]))
291 (desc, inst.args[0]))
281 # default to 600 seconds timeout
292 # default to 600 seconds timeout
282 l = lock.lock(self.join(lockname),
293 l = lock.lock(self.join(lockname),
283 int(self.ui.config("ui", "timeout") or 600),
294 int(self.ui.config("ui", "timeout") or 600),
284 releasefn, desc=desc)
295 releasefn, desc=desc)
285 if acquirefn:
296 if acquirefn:
286 acquirefn()
297 acquirefn()
287 return l
298 return l
288
299
289 def lock(self, wait=1):
300 def lock(self, wait=1):
290 return self.do_lock("lock", wait, acquirefn=self.reload,
301 return self.do_lock("lock", wait, acquirefn=self.reload,
291 desc=_('repository %s') % self.origroot)
302 desc=_('repository %s') % self.origroot)
292
303
293 def wlock(self, wait=1):
304 def wlock(self, wait=1):
294 return self.do_lock("wlock", wait, self.dirstate.write,
305 return self.do_lock("wlock", wait, self.dirstate.write,
295 self.wreload,
306 self.wreload,
296 desc=_('working directory of %s') % self.origroot)
307 desc=_('working directory of %s') % self.origroot)
297
308
298 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
309 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
299 "determine whether a new filenode is needed"
310 "determine whether a new filenode is needed"
300 fp1 = manifest1.get(filename, nullid)
311 fp1 = manifest1.get(filename, nullid)
301 fp2 = manifest2.get(filename, nullid)
312 fp2 = manifest2.get(filename, nullid)
302
313
303 if fp2 != nullid:
314 if fp2 != nullid:
304 # is one parent an ancestor of the other?
315 # is one parent an ancestor of the other?
305 fpa = filelog.ancestor(fp1, fp2)
316 fpa = filelog.ancestor(fp1, fp2)
306 if fpa == fp1:
317 if fpa == fp1:
307 fp1, fp2 = fp2, nullid
318 fp1, fp2 = fp2, nullid
308 elif fpa == fp2:
319 elif fpa == fp2:
309 fp2 = nullid
320 fp2 = nullid
310
321
311 # is the file unmodified from the parent? report existing entry
322 # is the file unmodified from the parent? report existing entry
312 if fp2 == nullid and text == filelog.read(fp1):
323 if fp2 == nullid and text == filelog.read(fp1):
313 return (fp1, None, None)
324 return (fp1, None, None)
314
325
315 return (None, fp1, fp2)
326 return (None, fp1, fp2)
316
327
317 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
328 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
318 orig_parent = self.dirstate.parents()[0] or nullid
329 orig_parent = self.dirstate.parents()[0] or nullid
319 p1 = p1 or self.dirstate.parents()[0] or nullid
330 p1 = p1 or self.dirstate.parents()[0] or nullid
320 p2 = p2 or self.dirstate.parents()[1] or nullid
331 p2 = p2 or self.dirstate.parents()[1] or nullid
321 c1 = self.changelog.read(p1)
332 c1 = self.changelog.read(p1)
322 c2 = self.changelog.read(p2)
333 c2 = self.changelog.read(p2)
323 m1 = self.manifest.read(c1[0])
334 m1 = self.manifest.read(c1[0])
324 mf1 = self.manifest.readflags(c1[0])
335 mf1 = self.manifest.readflags(c1[0])
325 m2 = self.manifest.read(c2[0])
336 m2 = self.manifest.read(c2[0])
326 changed = []
337 changed = []
327
338
328 if orig_parent == p1:
339 if orig_parent == p1:
329 update_dirstate = 1
340 update_dirstate = 1
330 else:
341 else:
331 update_dirstate = 0
342 update_dirstate = 0
332
343
333 if not wlock:
344 if not wlock:
334 wlock = self.wlock()
345 wlock = self.wlock()
335 l = self.lock()
346 l = self.lock()
336 tr = self.transaction()
347 tr = self.transaction()
337 mm = m1.copy()
348 mm = m1.copy()
338 mfm = mf1.copy()
349 mfm = mf1.copy()
339 linkrev = self.changelog.count()
350 linkrev = self.changelog.count()
340 for f in files:
351 for f in files:
341 try:
352 try:
342 t = self.wread(f)
353 t = self.wread(f)
343 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
354 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
344 r = self.file(f)
355 r = self.file(f)
345 mfm[f] = tm
356 mfm[f] = tm
346
357
347 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
358 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
348 if entry:
359 if entry:
349 mm[f] = entry
360 mm[f] = entry
350 continue
361 continue
351
362
352 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
363 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
353 changed.append(f)
364 changed.append(f)
354 if update_dirstate:
365 if update_dirstate:
355 self.dirstate.update([f], "n")
366 self.dirstate.update([f], "n")
356 except IOError:
367 except IOError:
357 try:
368 try:
358 del mm[f]
369 del mm[f]
359 del mfm[f]
370 del mfm[f]
360 if update_dirstate:
371 if update_dirstate:
361 self.dirstate.forget([f])
372 self.dirstate.forget([f])
362 except:
373 except:
363 # deleted from p2?
374 # deleted from p2?
364 pass
375 pass
365
376
366 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
377 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
367 user = user or self.ui.username()
378 user = user or self.ui.username()
368 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
379 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
369 tr.close()
380 tr.close()
370 if update_dirstate:
381 if update_dirstate:
371 self.dirstate.setparents(n, nullid)
382 self.dirstate.setparents(n, nullid)
372
383
373 def commit(self, files=None, text="", user=None, date=None,
384 def commit(self, files=None, text="", user=None, date=None,
374 match=util.always, force=False, lock=None, wlock=None):
385 match=util.always, force=False, lock=None, wlock=None):
375 commit = []
386 commit = []
376 remove = []
387 remove = []
377 changed = []
388 changed = []
378
389
379 if files:
390 if files:
380 for f in files:
391 for f in files:
381 s = self.dirstate.state(f)
392 s = self.dirstate.state(f)
382 if s in 'nmai':
393 if s in 'nmai':
383 commit.append(f)
394 commit.append(f)
384 elif s == 'r':
395 elif s == 'r':
385 remove.append(f)
396 remove.append(f)
386 else:
397 else:
387 self.ui.warn(_("%s not tracked!\n") % f)
398 self.ui.warn(_("%s not tracked!\n") % f)
388 else:
399 else:
389 modified, added, removed, deleted, unknown = self.changes(match=match)
400 modified, added, removed, deleted, unknown = self.changes(match=match)
390 commit = modified + added
401 commit = modified + added
391 remove = removed
402 remove = removed
392
403
393 p1, p2 = self.dirstate.parents()
404 p1, p2 = self.dirstate.parents()
394 c1 = self.changelog.read(p1)
405 c1 = self.changelog.read(p1)
395 c2 = self.changelog.read(p2)
406 c2 = self.changelog.read(p2)
396 m1 = self.manifest.read(c1[0])
407 m1 = self.manifest.read(c1[0])
397 mf1 = self.manifest.readflags(c1[0])
408 mf1 = self.manifest.readflags(c1[0])
398 m2 = self.manifest.read(c2[0])
409 m2 = self.manifest.read(c2[0])
399
410
400 if not commit and not remove and not force and p2 == nullid:
411 if not commit and not remove and not force and p2 == nullid:
401 self.ui.status(_("nothing changed\n"))
412 self.ui.status(_("nothing changed\n"))
402 return None
413 return None
403
414
404 xp1 = hex(p1)
415 xp1 = hex(p1)
405 if p2 == nullid: xp2 = ''
416 if p2 == nullid: xp2 = ''
406 else: xp2 = hex(p2)
417 else: xp2 = hex(p2)
407
418
408 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
419 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
409
420
410 if not wlock:
421 if not wlock:
411 wlock = self.wlock()
422 wlock = self.wlock()
412 if not lock:
423 if not lock:
413 lock = self.lock()
424 lock = self.lock()
414 tr = self.transaction()
425 tr = self.transaction()
415
426
416 # check in files
427 # check in files
417 new = {}
428 new = {}
418 linkrev = self.changelog.count()
429 linkrev = self.changelog.count()
419 commit.sort()
430 commit.sort()
420 for f in commit:
431 for f in commit:
421 self.ui.note(f + "\n")
432 self.ui.note(f + "\n")
422 try:
433 try:
423 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
434 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
424 t = self.wread(f)
435 t = self.wread(f)
425 except IOError:
436 except IOError:
426 self.ui.warn(_("trouble committing %s!\n") % f)
437 self.ui.warn(_("trouble committing %s!\n") % f)
427 raise
438 raise
428
439
429 r = self.file(f)
440 r = self.file(f)
430
441
431 meta = {}
442 meta = {}
432 cp = self.dirstate.copied(f)
443 cp = self.dirstate.copied(f)
433 if cp:
444 if cp:
434 meta["copy"] = cp
445 meta["copy"] = cp
435 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
446 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
436 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
447 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
437 fp1, fp2 = nullid, nullid
448 fp1, fp2 = nullid, nullid
438 else:
449 else:
439 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
450 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
440 if entry:
451 if entry:
441 new[f] = entry
452 new[f] = entry
442 continue
453 continue
443
454
444 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
455 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
445 # remember what we've added so that we can later calculate
456 # remember what we've added so that we can later calculate
446 # the files to pull from a set of changesets
457 # the files to pull from a set of changesets
447 changed.append(f)
458 changed.append(f)
448
459
449 # update manifest
460 # update manifest
450 m1 = m1.copy()
461 m1 = m1.copy()
451 m1.update(new)
462 m1.update(new)
452 for f in remove:
463 for f in remove:
453 if f in m1:
464 if f in m1:
454 del m1[f]
465 del m1[f]
455 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
466 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
456 (new, remove))
467 (new, remove))
457
468
458 # add changeset
469 # add changeset
459 new = new.keys()
470 new = new.keys()
460 new.sort()
471 new.sort()
461
472
462 user = user or self.ui.username()
473 user = user or self.ui.username()
463 if not text:
474 if not text:
464 edittext = [""]
475 edittext = [""]
465 if p2 != nullid:
476 if p2 != nullid:
466 edittext.append("HG: branch merge")
477 edittext.append("HG: branch merge")
467 edittext.extend(["HG: changed %s" % f for f in changed])
478 edittext.extend(["HG: changed %s" % f for f in changed])
468 edittext.extend(["HG: removed %s" % f for f in remove])
479 edittext.extend(["HG: removed %s" % f for f in remove])
469 if not changed and not remove:
480 if not changed and not remove:
470 edittext.append("HG: no files changed")
481 edittext.append("HG: no files changed")
471 edittext.append("")
482 edittext.append("")
472 # run editor in the repository root
483 # run editor in the repository root
473 olddir = os.getcwd()
484 olddir = os.getcwd()
474 os.chdir(self.root)
485 os.chdir(self.root)
475 edittext = self.ui.edit("\n".join(edittext), user)
486 edittext = self.ui.edit("\n".join(edittext), user)
476 os.chdir(olddir)
487 os.chdir(olddir)
477 if not edittext.rstrip():
488 if not edittext.rstrip():
478 return None
489 return None
479 text = edittext
490 text = edittext
480
491
481 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
492 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
482 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
493 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
483 parent2=xp2)
494 parent2=xp2)
484 tr.close()
495 tr.close()
485
496
486 self.dirstate.setparents(n)
497 self.dirstate.setparents(n)
487 self.dirstate.update(new, "n")
498 self.dirstate.update(new, "n")
488 self.dirstate.forget(remove)
499 self.dirstate.forget(remove)
489
500
490 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
501 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
491 return n
502 return n
492
503
493 def walk(self, node=None, files=[], match=util.always, badmatch=None):
504 def walk(self, node=None, files=[], match=util.always, badmatch=None):
494 if node:
505 if node:
495 fdict = dict.fromkeys(files)
506 fdict = dict.fromkeys(files)
496 for fn in self.manifest.read(self.changelog.read(node)[0]):
507 for fn in self.manifest.read(self.changelog.read(node)[0]):
497 fdict.pop(fn, None)
508 fdict.pop(fn, None)
498 if match(fn):
509 if match(fn):
499 yield 'm', fn
510 yield 'm', fn
500 for fn in fdict:
511 for fn in fdict:
501 if badmatch and badmatch(fn):
512 if badmatch and badmatch(fn):
502 if match(fn):
513 if match(fn):
503 yield 'b', fn
514 yield 'b', fn
504 else:
515 else:
505 self.ui.warn(_('%s: No such file in rev %s\n') % (
516 self.ui.warn(_('%s: No such file in rev %s\n') % (
506 util.pathto(self.getcwd(), fn), short(node)))
517 util.pathto(self.getcwd(), fn), short(node)))
507 else:
518 else:
508 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
519 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
509 yield src, fn
520 yield src, fn
510
521
511 def changes(self, node1=None, node2=None, files=[], match=util.always,
522 def changes(self, node1=None, node2=None, files=[], match=util.always,
512 wlock=None, show_ignored=None):
523 wlock=None, show_ignored=None):
513 """return changes between two nodes or node and working directory
524 """return changes between two nodes or node and working directory
514
525
515 If node1 is None, use the first dirstate parent instead.
526 If node1 is None, use the first dirstate parent instead.
516 If node2 is None, compare node1 with working directory.
527 If node2 is None, compare node1 with working directory.
517 """
528 """
518
529
519 def fcmp(fn, mf):
530 def fcmp(fn, mf):
520 t1 = self.wread(fn)
531 t1 = self.wread(fn)
521 t2 = self.file(fn).read(mf.get(fn, nullid))
532 t2 = self.file(fn).read(mf.get(fn, nullid))
522 return cmp(t1, t2)
533 return cmp(t1, t2)
523
534
524 def mfmatches(node):
535 def mfmatches(node):
525 change = self.changelog.read(node)
536 change = self.changelog.read(node)
526 mf = dict(self.manifest.read(change[0]))
537 mf = dict(self.manifest.read(change[0]))
527 for fn in mf.keys():
538 for fn in mf.keys():
528 if not match(fn):
539 if not match(fn):
529 del mf[fn]
540 del mf[fn]
530 return mf
541 return mf
531
542
532 if node1:
543 if node1:
533 # read the manifest from node1 before the manifest from node2,
544 # read the manifest from node1 before the manifest from node2,
534 # so that we'll hit the manifest cache if we're going through
545 # so that we'll hit the manifest cache if we're going through
535 # all the revisions in parent->child order.
546 # all the revisions in parent->child order.
536 mf1 = mfmatches(node1)
547 mf1 = mfmatches(node1)
537
548
538 # are we comparing the working directory?
549 # are we comparing the working directory?
539 if not node2:
550 if not node2:
540 if not wlock:
551 if not wlock:
541 try:
552 try:
542 wlock = self.wlock(wait=0)
553 wlock = self.wlock(wait=0)
543 except lock.LockException:
554 except lock.LockException:
544 wlock = None
555 wlock = None
545 lookup, modified, added, removed, deleted, unknown, ignored = (
556 lookup, modified, added, removed, deleted, unknown, ignored = (
546 self.dirstate.changes(files, match, show_ignored))
557 self.dirstate.changes(files, match, show_ignored))
547
558
548 # are we comparing working dir against its parent?
559 # are we comparing working dir against its parent?
549 if not node1:
560 if not node1:
550 if lookup:
561 if lookup:
551 # do a full compare of any files that might have changed
562 # do a full compare of any files that might have changed
552 mf2 = mfmatches(self.dirstate.parents()[0])
563 mf2 = mfmatches(self.dirstate.parents()[0])
553 for f in lookup:
564 for f in lookup:
554 if fcmp(f, mf2):
565 if fcmp(f, mf2):
555 modified.append(f)
566 modified.append(f)
556 elif wlock is not None:
567 elif wlock is not None:
557 self.dirstate.update([f], "n")
568 self.dirstate.update([f], "n")
558 else:
569 else:
559 # we are comparing working dir against non-parent
570 # we are comparing working dir against non-parent
560 # generate a pseudo-manifest for the working dir
571 # generate a pseudo-manifest for the working dir
561 mf2 = mfmatches(self.dirstate.parents()[0])
572 mf2 = mfmatches(self.dirstate.parents()[0])
562 for f in lookup + modified + added:
573 for f in lookup + modified + added:
563 mf2[f] = ""
574 mf2[f] = ""
564 for f in removed:
575 for f in removed:
565 if f in mf2:
576 if f in mf2:
566 del mf2[f]
577 del mf2[f]
567 else:
578 else:
568 # we are comparing two revisions
579 # we are comparing two revisions
569 deleted, unknown, ignored = [], [], []
580 deleted, unknown, ignored = [], [], []
570 mf2 = mfmatches(node2)
581 mf2 = mfmatches(node2)
571
582
572 if node1:
583 if node1:
573 # flush lists from dirstate before comparing manifests
584 # flush lists from dirstate before comparing manifests
574 modified, added = [], []
585 modified, added = [], []
575
586
576 for fn in mf2:
587 for fn in mf2:
577 if mf1.has_key(fn):
588 if mf1.has_key(fn):
578 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
589 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
579 modified.append(fn)
590 modified.append(fn)
580 del mf1[fn]
591 del mf1[fn]
581 else:
592 else:
582 added.append(fn)
593 added.append(fn)
583
594
584 removed = mf1.keys()
595 removed = mf1.keys()
585
596
586 # sort and return results:
597 # sort and return results:
587 for l in modified, added, removed, deleted, unknown, ignored:
598 for l in modified, added, removed, deleted, unknown, ignored:
588 l.sort()
599 l.sort()
589 if show_ignored is None:
600 if show_ignored is None:
590 return (modified, added, removed, deleted, unknown)
601 return (modified, added, removed, deleted, unknown)
591 else:
602 else:
592 return (modified, added, removed, deleted, unknown, ignored)
603 return (modified, added, removed, deleted, unknown, ignored)
593
604
594 def add(self, list, wlock=None):
605 def add(self, list, wlock=None):
595 if not wlock:
606 if not wlock:
596 wlock = self.wlock()
607 wlock = self.wlock()
597 for f in list:
608 for f in list:
598 p = self.wjoin(f)
609 p = self.wjoin(f)
599 if not os.path.exists(p):
610 if not os.path.exists(p):
600 self.ui.warn(_("%s does not exist!\n") % f)
611 self.ui.warn(_("%s does not exist!\n") % f)
601 elif not os.path.isfile(p):
612 elif not os.path.isfile(p):
602 self.ui.warn(_("%s not added: only files supported currently\n")
613 self.ui.warn(_("%s not added: only files supported currently\n")
603 % f)
614 % f)
604 elif self.dirstate.state(f) in 'an':
615 elif self.dirstate.state(f) in 'an':
605 self.ui.warn(_("%s already tracked!\n") % f)
616 self.ui.warn(_("%s already tracked!\n") % f)
606 else:
617 else:
607 self.dirstate.update([f], "a")
618 self.dirstate.update([f], "a")
608
619
609 def forget(self, list, wlock=None):
620 def forget(self, list, wlock=None):
610 if not wlock:
621 if not wlock:
611 wlock = self.wlock()
622 wlock = self.wlock()
612 for f in list:
623 for f in list:
613 if self.dirstate.state(f) not in 'ai':
624 if self.dirstate.state(f) not in 'ai':
614 self.ui.warn(_("%s not added!\n") % f)
625 self.ui.warn(_("%s not added!\n") % f)
615 else:
626 else:
616 self.dirstate.forget([f])
627 self.dirstate.forget([f])
617
628
618 def remove(self, list, unlink=False, wlock=None):
629 def remove(self, list, unlink=False, wlock=None):
619 if unlink:
630 if unlink:
620 for f in list:
631 for f in list:
621 try:
632 try:
622 util.unlink(self.wjoin(f))
633 util.unlink(self.wjoin(f))
623 except OSError, inst:
634 except OSError, inst:
624 if inst.errno != errno.ENOENT:
635 if inst.errno != errno.ENOENT:
625 raise
636 raise
626 if not wlock:
637 if not wlock:
627 wlock = self.wlock()
638 wlock = self.wlock()
628 for f in list:
639 for f in list:
629 p = self.wjoin(f)
640 p = self.wjoin(f)
630 if os.path.exists(p):
641 if os.path.exists(p):
631 self.ui.warn(_("%s still exists!\n") % f)
642 self.ui.warn(_("%s still exists!\n") % f)
632 elif self.dirstate.state(f) == 'a':
643 elif self.dirstate.state(f) == 'a':
633 self.dirstate.forget([f])
644 self.dirstate.forget([f])
634 elif f not in self.dirstate:
645 elif f not in self.dirstate:
635 self.ui.warn(_("%s not tracked!\n") % f)
646 self.ui.warn(_("%s not tracked!\n") % f)
636 else:
647 else:
637 self.dirstate.update([f], "r")
648 self.dirstate.update([f], "r")
638
649
639 def undelete(self, list, wlock=None):
650 def undelete(self, list, wlock=None):
640 p = self.dirstate.parents()[0]
651 p = self.dirstate.parents()[0]
641 mn = self.changelog.read(p)[0]
652 mn = self.changelog.read(p)[0]
642 mf = self.manifest.readflags(mn)
653 mf = self.manifest.readflags(mn)
643 m = self.manifest.read(mn)
654 m = self.manifest.read(mn)
644 if not wlock:
655 if not wlock:
645 wlock = self.wlock()
656 wlock = self.wlock()
646 for f in list:
657 for f in list:
647 if self.dirstate.state(f) not in "r":
658 if self.dirstate.state(f) not in "r":
648 self.ui.warn("%s not removed!\n" % f)
659 self.ui.warn("%s not removed!\n" % f)
649 else:
660 else:
650 t = self.file(f).read(m[f])
661 t = self.file(f).read(m[f])
651 self.wwrite(f, t)
662 self.wwrite(f, t)
652 util.set_exec(self.wjoin(f), mf[f])
663 util.set_exec(self.wjoin(f), mf[f])
653 self.dirstate.update([f], "n")
664 self.dirstate.update([f], "n")
654
665
655 def copy(self, source, dest, wlock=None):
666 def copy(self, source, dest, wlock=None):
656 p = self.wjoin(dest)
667 p = self.wjoin(dest)
657 if not os.path.exists(p):
668 if not os.path.exists(p):
658 self.ui.warn(_("%s does not exist!\n") % dest)
669 self.ui.warn(_("%s does not exist!\n") % dest)
659 elif not os.path.isfile(p):
670 elif not os.path.isfile(p):
660 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
671 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
661 else:
672 else:
662 if not wlock:
673 if not wlock:
663 wlock = self.wlock()
674 wlock = self.wlock()
664 if self.dirstate.state(dest) == '?':
675 if self.dirstate.state(dest) == '?':
665 self.dirstate.update([dest], "a")
676 self.dirstate.update([dest], "a")
666 self.dirstate.copy(source, dest)
677 self.dirstate.copy(source, dest)
667
678
668 def heads(self, start=None):
679 def heads(self, start=None):
669 heads = self.changelog.heads(start)
680 heads = self.changelog.heads(start)
670 # sort the output in rev descending order
681 # sort the output in rev descending order
671 heads = [(-self.changelog.rev(h), h) for h in heads]
682 heads = [(-self.changelog.rev(h), h) for h in heads]
672 heads.sort()
683 heads.sort()
673 return [n for (r, n) in heads]
684 return [n for (r, n) in heads]
674
685
675 # branchlookup returns a dict giving a list of branches for
686 # branchlookup returns a dict giving a list of branches for
676 # each head. A branch is defined as the tag of a node or
687 # each head. A branch is defined as the tag of a node or
677 # the branch of the node's parents. If a node has multiple
688 # the branch of the node's parents. If a node has multiple
678 # branch tags, tags are eliminated if they are visible from other
689 # branch tags, tags are eliminated if they are visible from other
679 # branch tags.
690 # branch tags.
680 #
691 #
681 # So, for this graph: a->b->c->d->e
692 # So, for this graph: a->b->c->d->e
682 # \ /
693 # \ /
683 # aa -----/
694 # aa -----/
684 # a has tag 2.6.12
695 # a has tag 2.6.12
685 # d has tag 2.6.13
696 # d has tag 2.6.13
686 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
697 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
687 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
698 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
688 # from the list.
699 # from the list.
689 #
700 #
690 # It is possible that more than one head will have the same branch tag.
701 # It is possible that more than one head will have the same branch tag.
691 # callers need to check the result for multiple heads under the same
702 # callers need to check the result for multiple heads under the same
692 # branch tag if that is a problem for them (ie checkout of a specific
703 # branch tag if that is a problem for them (ie checkout of a specific
693 # branch).
704 # branch).
694 #
705 #
695 # passing in a specific branch will limit the depth of the search
706 # passing in a specific branch will limit the depth of the search
696 # through the parents. It won't limit the branches returned in the
707 # through the parents. It won't limit the branches returned in the
697 # result though.
708 # result though.
698 def branchlookup(self, heads=None, branch=None):
709 def branchlookup(self, heads=None, branch=None):
699 if not heads:
710 if not heads:
700 heads = self.heads()
711 heads = self.heads()
701 headt = [ h for h in heads ]
712 headt = [ h for h in heads ]
702 chlog = self.changelog
713 chlog = self.changelog
703 branches = {}
714 branches = {}
704 merges = []
715 merges = []
705 seenmerge = {}
716 seenmerge = {}
706
717
707 # traverse the tree once for each head, recording in the branches
718 # traverse the tree once for each head, recording in the branches
708 # dict which tags are visible from this head. The branches
719 # dict which tags are visible from this head. The branches
709 # dict also records which tags are visible from each tag
720 # dict also records which tags are visible from each tag
710 # while we traverse.
721 # while we traverse.
711 while headt or merges:
722 while headt or merges:
712 if merges:
723 if merges:
713 n, found = merges.pop()
724 n, found = merges.pop()
714 visit = [n]
725 visit = [n]
715 else:
726 else:
716 h = headt.pop()
727 h = headt.pop()
717 visit = [h]
728 visit = [h]
718 found = [h]
729 found = [h]
719 seen = {}
730 seen = {}
720 while visit:
731 while visit:
721 n = visit.pop()
732 n = visit.pop()
722 if n in seen:
733 if n in seen:
723 continue
734 continue
724 pp = chlog.parents(n)
735 pp = chlog.parents(n)
725 tags = self.nodetags(n)
736 tags = self.nodetags(n)
726 if tags:
737 if tags:
727 for x in tags:
738 for x in tags:
728 if x == 'tip':
739 if x == 'tip':
729 continue
740 continue
730 for f in found:
741 for f in found:
731 branches.setdefault(f, {})[n] = 1
742 branches.setdefault(f, {})[n] = 1
732 branches.setdefault(n, {})[n] = 1
743 branches.setdefault(n, {})[n] = 1
733 break
744 break
734 if n not in found:
745 if n not in found:
735 found.append(n)
746 found.append(n)
736 if branch in tags:
747 if branch in tags:
737 continue
748 continue
738 seen[n] = 1
749 seen[n] = 1
739 if pp[1] != nullid and n not in seenmerge:
750 if pp[1] != nullid and n not in seenmerge:
740 merges.append((pp[1], [x for x in found]))
751 merges.append((pp[1], [x for x in found]))
741 seenmerge[n] = 1
752 seenmerge[n] = 1
742 if pp[0] != nullid:
753 if pp[0] != nullid:
743 visit.append(pp[0])
754 visit.append(pp[0])
744 # traverse the branches dict, eliminating branch tags from each
755 # traverse the branches dict, eliminating branch tags from each
745 # head that are visible from another branch tag for that head.
756 # head that are visible from another branch tag for that head.
746 out = {}
757 out = {}
747 viscache = {}
758 viscache = {}
748 for h in heads:
759 for h in heads:
749 def visible(node):
760 def visible(node):
750 if node in viscache:
761 if node in viscache:
751 return viscache[node]
762 return viscache[node]
752 ret = {}
763 ret = {}
753 visit = [node]
764 visit = [node]
754 while visit:
765 while visit:
755 x = visit.pop()
766 x = visit.pop()
756 if x in viscache:
767 if x in viscache:
757 ret.update(viscache[x])
768 ret.update(viscache[x])
758 elif x not in ret:
769 elif x not in ret:
759 ret[x] = 1
770 ret[x] = 1
760 if x in branches:
771 if x in branches:
761 visit[len(visit):] = branches[x].keys()
772 visit[len(visit):] = branches[x].keys()
762 viscache[node] = ret
773 viscache[node] = ret
763 return ret
774 return ret
764 if h not in branches:
775 if h not in branches:
765 continue
776 continue
766 # O(n^2), but somewhat limited. This only searches the
777 # O(n^2), but somewhat limited. This only searches the
767 # tags visible from a specific head, not all the tags in the
778 # tags visible from a specific head, not all the tags in the
768 # whole repo.
779 # whole repo.
769 for b in branches[h]:
780 for b in branches[h]:
770 vis = False
781 vis = False
771 for bb in branches[h].keys():
782 for bb in branches[h].keys():
772 if b != bb:
783 if b != bb:
773 if b in visible(bb):
784 if b in visible(bb):
774 vis = True
785 vis = True
775 break
786 break
776 if not vis:
787 if not vis:
777 l = out.setdefault(h, [])
788 l = out.setdefault(h, [])
778 l[len(l):] = self.nodetags(b)
789 l[len(l):] = self.nodetags(b)
779 return out
790 return out
780
791
781 def branches(self, nodes):
792 def branches(self, nodes):
782 if not nodes:
793 if not nodes:
783 nodes = [self.changelog.tip()]
794 nodes = [self.changelog.tip()]
784 b = []
795 b = []
785 for n in nodes:
796 for n in nodes:
786 t = n
797 t = n
787 while n:
798 while n:
788 p = self.changelog.parents(n)
799 p = self.changelog.parents(n)
789 if p[1] != nullid or p[0] == nullid:
800 if p[1] != nullid or p[0] == nullid:
790 b.append((t, n, p[0], p[1]))
801 b.append((t, n, p[0], p[1]))
791 break
802 break
792 n = p[0]
803 n = p[0]
793 return b
804 return b
794
805
795 def between(self, pairs):
806 def between(self, pairs):
796 r = []
807 r = []
797
808
798 for top, bottom in pairs:
809 for top, bottom in pairs:
799 n, l, i = top, [], 0
810 n, l, i = top, [], 0
800 f = 1
811 f = 1
801
812
802 while n != bottom:
813 while n != bottom:
803 p = self.changelog.parents(n)[0]
814 p = self.changelog.parents(n)[0]
804 if i == f:
815 if i == f:
805 l.append(n)
816 l.append(n)
806 f = f * 2
817 f = f * 2
807 n = p
818 n = p
808 i += 1
819 i += 1
809
820
810 r.append(l)
821 r.append(l)
811
822
812 return r
823 return r
813
824
814 def findincoming(self, remote, base=None, heads=None, force=False):
825 def findincoming(self, remote, base=None, heads=None, force=False):
815 m = self.changelog.nodemap
826 m = self.changelog.nodemap
816 search = []
827 search = []
817 fetch = {}
828 fetch = {}
818 seen = {}
829 seen = {}
819 seenbranch = {}
830 seenbranch = {}
820 if base == None:
831 if base == None:
821 base = {}
832 base = {}
822
833
823 # assume we're closer to the tip than the root
834 # assume we're closer to the tip than the root
824 # and start by examining the heads
835 # and start by examining the heads
825 self.ui.status(_("searching for changes\n"))
836 self.ui.status(_("searching for changes\n"))
826
837
827 if not heads:
838 if not heads:
828 heads = remote.heads()
839 heads = remote.heads()
829
840
830 unknown = []
841 unknown = []
831 for h in heads:
842 for h in heads:
832 if h not in m:
843 if h not in m:
833 unknown.append(h)
844 unknown.append(h)
834 else:
845 else:
835 base[h] = 1
846 base[h] = 1
836
847
837 if not unknown:
848 if not unknown:
838 return []
849 return []
839
850
840 rep = {}
851 rep = {}
841 reqcnt = 0
852 reqcnt = 0
842
853
843 # search through remote branches
854 # search through remote branches
844 # a 'branch' here is a linear segment of history, with four parts:
855 # a 'branch' here is a linear segment of history, with four parts:
845 # head, root, first parent, second parent
856 # head, root, first parent, second parent
846 # (a branch always has two parents (or none) by definition)
857 # (a branch always has two parents (or none) by definition)
847 unknown = remote.branches(unknown)
858 unknown = remote.branches(unknown)
848 while unknown:
859 while unknown:
849 r = []
860 r = []
850 while unknown:
861 while unknown:
851 n = unknown.pop(0)
862 n = unknown.pop(0)
852 if n[0] in seen:
863 if n[0] in seen:
853 continue
864 continue
854
865
855 self.ui.debug(_("examining %s:%s\n")
866 self.ui.debug(_("examining %s:%s\n")
856 % (short(n[0]), short(n[1])))
867 % (short(n[0]), short(n[1])))
857 if n[0] == nullid:
868 if n[0] == nullid:
858 break
869 break
859 if n in seenbranch:
870 if n in seenbranch:
860 self.ui.debug(_("branch already found\n"))
871 self.ui.debug(_("branch already found\n"))
861 continue
872 continue
862 if n[1] and n[1] in m: # do we know the base?
873 if n[1] and n[1] in m: # do we know the base?
863 self.ui.debug(_("found incomplete branch %s:%s\n")
874 self.ui.debug(_("found incomplete branch %s:%s\n")
864 % (short(n[0]), short(n[1])))
875 % (short(n[0]), short(n[1])))
865 search.append(n) # schedule branch range for scanning
876 search.append(n) # schedule branch range for scanning
866 seenbranch[n] = 1
877 seenbranch[n] = 1
867 else:
878 else:
868 if n[1] not in seen and n[1] not in fetch:
879 if n[1] not in seen and n[1] not in fetch:
869 if n[2] in m and n[3] in m:
880 if n[2] in m and n[3] in m:
870 self.ui.debug(_("found new changeset %s\n") %
881 self.ui.debug(_("found new changeset %s\n") %
871 short(n[1]))
882 short(n[1]))
872 fetch[n[1]] = 1 # earliest unknown
883 fetch[n[1]] = 1 # earliest unknown
873 base[n[2]] = 1 # latest known
884 base[n[2]] = 1 # latest known
874 continue
885 continue
875
886
876 for a in n[2:4]:
887 for a in n[2:4]:
877 if a not in rep:
888 if a not in rep:
878 r.append(a)
889 r.append(a)
879 rep[a] = 1
890 rep[a] = 1
880
891
881 seen[n[0]] = 1
892 seen[n[0]] = 1
882
893
883 if r:
894 if r:
884 reqcnt += 1
895 reqcnt += 1
885 self.ui.debug(_("request %d: %s\n") %
896 self.ui.debug(_("request %d: %s\n") %
886 (reqcnt, " ".join(map(short, r))))
897 (reqcnt, " ".join(map(short, r))))
887 for p in range(0, len(r), 10):
898 for p in range(0, len(r), 10):
888 for b in remote.branches(r[p:p+10]):
899 for b in remote.branches(r[p:p+10]):
889 self.ui.debug(_("received %s:%s\n") %
900 self.ui.debug(_("received %s:%s\n") %
890 (short(b[0]), short(b[1])))
901 (short(b[0]), short(b[1])))
891 if b[0] in m:
902 if b[0] in m:
892 self.ui.debug(_("found base node %s\n")
903 self.ui.debug(_("found base node %s\n")
893 % short(b[0]))
904 % short(b[0]))
894 base[b[0]] = 1
905 base[b[0]] = 1
895 elif b[0] not in seen:
906 elif b[0] not in seen:
896 unknown.append(b)
907 unknown.append(b)
897
908
898 # do binary search on the branches we found
909 # do binary search on the branches we found
899 while search:
910 while search:
900 n = search.pop(0)
911 n = search.pop(0)
901 reqcnt += 1
912 reqcnt += 1
902 l = remote.between([(n[0], n[1])])[0]
913 l = remote.between([(n[0], n[1])])[0]
903 l.append(n[1])
914 l.append(n[1])
904 p = n[0]
915 p = n[0]
905 f = 1
916 f = 1
906 for i in l:
917 for i in l:
907 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
918 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
908 if i in m:
919 if i in m:
909 if f <= 2:
920 if f <= 2:
910 self.ui.debug(_("found new branch changeset %s\n") %
921 self.ui.debug(_("found new branch changeset %s\n") %
911 short(p))
922 short(p))
912 fetch[p] = 1
923 fetch[p] = 1
913 base[i] = 1
924 base[i] = 1
914 else:
925 else:
915 self.ui.debug(_("narrowed branch search to %s:%s\n")
926 self.ui.debug(_("narrowed branch search to %s:%s\n")
916 % (short(p), short(i)))
927 % (short(p), short(i)))
917 search.append((p, i))
928 search.append((p, i))
918 break
929 break
919 p, f = i, f * 2
930 p, f = i, f * 2
920
931
921 # sanity check our fetch list
932 # sanity check our fetch list
922 for f in fetch.keys():
933 for f in fetch.keys():
923 if f in m:
934 if f in m:
924 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
935 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
925
936
926 if base.keys() == [nullid]:
937 if base.keys() == [nullid]:
927 if force:
938 if force:
928 self.ui.warn(_("warning: repository is unrelated\n"))
939 self.ui.warn(_("warning: repository is unrelated\n"))
929 else:
940 else:
930 raise util.Abort(_("repository is unrelated"))
941 raise util.Abort(_("repository is unrelated"))
931
942
932 self.ui.note(_("found new changesets starting at ") +
943 self.ui.note(_("found new changesets starting at ") +
933 " ".join([short(f) for f in fetch]) + "\n")
944 " ".join([short(f) for f in fetch]) + "\n")
934
945
935 self.ui.debug(_("%d total queries\n") % reqcnt)
946 self.ui.debug(_("%d total queries\n") % reqcnt)
936
947
937 return fetch.keys()
948 return fetch.keys()
938
949
939 def findoutgoing(self, remote, base=None, heads=None, force=False):
950 def findoutgoing(self, remote, base=None, heads=None, force=False):
940 """Return list of nodes that are roots of subsets not in remote
951 """Return list of nodes that are roots of subsets not in remote
941
952
942 If base dict is specified, assume that these nodes and their parents
953 If base dict is specified, assume that these nodes and their parents
943 exist on the remote side.
954 exist on the remote side.
944 If a list of heads is specified, return only nodes which are heads
955 If a list of heads is specified, return only nodes which are heads
945 or ancestors of these heads, and return a second element which
956 or ancestors of these heads, and return a second element which
946 contains all remote heads which get new children.
957 contains all remote heads which get new children.
947 """
958 """
948 if base == None:
959 if base == None:
949 base = {}
960 base = {}
950 self.findincoming(remote, base, heads, force=force)
961 self.findincoming(remote, base, heads, force=force)
951
962
952 self.ui.debug(_("common changesets up to ")
963 self.ui.debug(_("common changesets up to ")
953 + " ".join(map(short, base.keys())) + "\n")
964 + " ".join(map(short, base.keys())) + "\n")
954
965
955 remain = dict.fromkeys(self.changelog.nodemap)
966 remain = dict.fromkeys(self.changelog.nodemap)
956
967
957 # prune everything remote has from the tree
968 # prune everything remote has from the tree
958 del remain[nullid]
969 del remain[nullid]
959 remove = base.keys()
970 remove = base.keys()
960 while remove:
971 while remove:
961 n = remove.pop(0)
972 n = remove.pop(0)
962 if n in remain:
973 if n in remain:
963 del remain[n]
974 del remain[n]
964 for p in self.changelog.parents(n):
975 for p in self.changelog.parents(n):
965 remove.append(p)
976 remove.append(p)
966
977
967 # find every node whose parents have been pruned
978 # find every node whose parents have been pruned
968 subset = []
979 subset = []
969 # find every remote head that will get new children
980 # find every remote head that will get new children
970 updated_heads = {}
981 updated_heads = {}
971 for n in remain:
982 for n in remain:
972 p1, p2 = self.changelog.parents(n)
983 p1, p2 = self.changelog.parents(n)
973 if p1 not in remain and p2 not in remain:
984 if p1 not in remain and p2 not in remain:
974 subset.append(n)
985 subset.append(n)
975 if heads:
986 if heads:
976 if p1 in heads:
987 if p1 in heads:
977 updated_heads[p1] = True
988 updated_heads[p1] = True
978 if p2 in heads:
989 if p2 in heads:
979 updated_heads[p2] = True
990 updated_heads[p2] = True
980
991
981 # this is the set of all roots we have to push
992 # this is the set of all roots we have to push
982 if heads:
993 if heads:
983 return subset, updated_heads.keys()
994 return subset, updated_heads.keys()
984 else:
995 else:
985 return subset
996 return subset
986
997
987 def pull(self, remote, heads=None, force=False):
998 def pull(self, remote, heads=None, force=False):
988 l = self.lock()
999 l = self.lock()
989
1000
990 # if we have an empty repo, fetch everything
1001 # if we have an empty repo, fetch everything
991 if self.changelog.tip() == nullid:
1002 if self.changelog.tip() == nullid:
992 self.ui.status(_("requesting all changes\n"))
1003 self.ui.status(_("requesting all changes\n"))
993 fetch = [nullid]
1004 fetch = [nullid]
994 else:
1005 else:
995 fetch = self.findincoming(remote, force=force)
1006 fetch = self.findincoming(remote, force=force)
996
1007
997 if not fetch:
1008 if not fetch:
998 self.ui.status(_("no changes found\n"))
1009 self.ui.status(_("no changes found\n"))
999 return 0
1010 return 0
1000
1011
1001 if heads is None:
1012 if heads is None:
1002 cg = remote.changegroup(fetch, 'pull')
1013 cg = remote.changegroup(fetch, 'pull')
1003 else:
1014 else:
1004 cg = remote.changegroupsubset(fetch, heads, 'pull')
1015 cg = remote.changegroupsubset(fetch, heads, 'pull')
1005 return self.addchangegroup(cg)
1016 return self.addchangegroup(cg)
1006
1017
1007 def push(self, remote, force=False, revs=None):
1018 def push(self, remote, force=False, revs=None):
1008 lock = remote.lock()
1019 lock = remote.lock()
1009
1020
1010 base = {}
1021 base = {}
1011 remote_heads = remote.heads()
1022 remote_heads = remote.heads()
1012 inc = self.findincoming(remote, base, remote_heads, force=force)
1023 inc = self.findincoming(remote, base, remote_heads, force=force)
1013 if not force and inc:
1024 if not force and inc:
1014 self.ui.warn(_("abort: unsynced remote changes!\n"))
1025 self.ui.warn(_("abort: unsynced remote changes!\n"))
1015 self.ui.status(_("(did you forget to sync?"
1026 self.ui.status(_("(did you forget to sync?"
1016 " use push -f to force)\n"))
1027 " use push -f to force)\n"))
1017 return 1
1028 return 1
1018
1029
1019 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1030 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1020 if revs is not None:
1031 if revs is not None:
1021 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1032 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1022 else:
1033 else:
1023 bases, heads = update, self.changelog.heads()
1034 bases, heads = update, self.changelog.heads()
1024
1035
1025 if not bases:
1036 if not bases:
1026 self.ui.status(_("no changes found\n"))
1037 self.ui.status(_("no changes found\n"))
1027 return 1
1038 return 1
1028 elif not force:
1039 elif not force:
1029 if revs is not None:
1040 if revs is not None:
1030 updated_heads = {}
1041 updated_heads = {}
1031 for base in msng_cl:
1042 for base in msng_cl:
1032 for parent in self.changelog.parents(base):
1043 for parent in self.changelog.parents(base):
1033 if parent in remote_heads:
1044 if parent in remote_heads:
1034 updated_heads[parent] = True
1045 updated_heads[parent] = True
1035 updated_heads = updated_heads.keys()
1046 updated_heads = updated_heads.keys()
1036 if len(updated_heads) < len(heads):
1047 if len(updated_heads) < len(heads):
1037 self.ui.warn(_("abort: push creates new remote branches!\n"))
1048 self.ui.warn(_("abort: push creates new remote branches!\n"))
1038 self.ui.status(_("(did you forget to merge?"
1049 self.ui.status(_("(did you forget to merge?"
1039 " use push -f to force)\n"))
1050 " use push -f to force)\n"))
1040 return 1
1051 return 1
1041
1052
1042 if revs is None:
1053 if revs is None:
1043 cg = self.changegroup(update, 'push')
1054 cg = self.changegroup(update, 'push')
1044 else:
1055 else:
1045 cg = self.changegroupsubset(update, revs, 'push')
1056 cg = self.changegroupsubset(update, revs, 'push')
1046 return remote.addchangegroup(cg)
1057 return remote.addchangegroup(cg)
1047
1058
1048 def changegroupsubset(self, bases, heads, source):
1059 def changegroupsubset(self, bases, heads, source):
1049 """This function generates a changegroup consisting of all the nodes
1060 """This function generates a changegroup consisting of all the nodes
1050 that are descendents of any of the bases, and ancestors of any of
1061 that are descendents of any of the bases, and ancestors of any of
1051 the heads.
1062 the heads.
1052
1063
1053 It is fairly complex as determining which filenodes and which
1064 It is fairly complex as determining which filenodes and which
1054 manifest nodes need to be included for the changeset to be complete
1065 manifest nodes need to be included for the changeset to be complete
1055 is non-trivial.
1066 is non-trivial.
1056
1067
1057 Another wrinkle is doing the reverse, figuring out which changeset in
1068 Another wrinkle is doing the reverse, figuring out which changeset in
1058 the changegroup a particular filenode or manifestnode belongs to."""
1069 the changegroup a particular filenode or manifestnode belongs to."""
1059
1070
1060 self.hook('preoutgoing', throw=True, source=source)
1071 self.hook('preoutgoing', throw=True, source=source)
1061
1072
1062 # Set up some initial variables
1073 # Set up some initial variables
1063 # Make it easy to refer to self.changelog
1074 # Make it easy to refer to self.changelog
1064 cl = self.changelog
1075 cl = self.changelog
1065 # msng is short for missing - compute the list of changesets in this
1076 # msng is short for missing - compute the list of changesets in this
1066 # changegroup.
1077 # changegroup.
1067 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1078 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1068 # Some bases may turn out to be superfluous, and some heads may be
1079 # Some bases may turn out to be superfluous, and some heads may be
1069 # too. nodesbetween will return the minimal set of bases and heads
1080 # too. nodesbetween will return the minimal set of bases and heads
1070 # necessary to re-create the changegroup.
1081 # necessary to re-create the changegroup.
1071
1082
1072 # Known heads are the list of heads that it is assumed the recipient
1083 # Known heads are the list of heads that it is assumed the recipient
1073 # of this changegroup will know about.
1084 # of this changegroup will know about.
1074 knownheads = {}
1085 knownheads = {}
1075 # We assume that all parents of bases are known heads.
1086 # We assume that all parents of bases are known heads.
1076 for n in bases:
1087 for n in bases:
1077 for p in cl.parents(n):
1088 for p in cl.parents(n):
1078 if p != nullid:
1089 if p != nullid:
1079 knownheads[p] = 1
1090 knownheads[p] = 1
1080 knownheads = knownheads.keys()
1091 knownheads = knownheads.keys()
1081 if knownheads:
1092 if knownheads:
1082 # Now that we know what heads are known, we can compute which
1093 # Now that we know what heads are known, we can compute which
1083 # changesets are known. The recipient must know about all
1094 # changesets are known. The recipient must know about all
1084 # changesets required to reach the known heads from the null
1095 # changesets required to reach the known heads from the null
1085 # changeset.
1096 # changeset.
1086 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1097 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1087 junk = None
1098 junk = None
1088 # Transform the list into an ersatz set.
1099 # Transform the list into an ersatz set.
1089 has_cl_set = dict.fromkeys(has_cl_set)
1100 has_cl_set = dict.fromkeys(has_cl_set)
1090 else:
1101 else:
1091 # If there were no known heads, the recipient cannot be assumed to
1102 # If there were no known heads, the recipient cannot be assumed to
1092 # know about any changesets.
1103 # know about any changesets.
1093 has_cl_set = {}
1104 has_cl_set = {}
1094
1105
1095 # Make it easy to refer to self.manifest
1106 # Make it easy to refer to self.manifest
1096 mnfst = self.manifest
1107 mnfst = self.manifest
1097 # We don't know which manifests are missing yet
1108 # We don't know which manifests are missing yet
1098 msng_mnfst_set = {}
1109 msng_mnfst_set = {}
1099 # Nor do we know which filenodes are missing.
1110 # Nor do we know which filenodes are missing.
1100 msng_filenode_set = {}
1111 msng_filenode_set = {}
1101
1112
1102 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1113 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1103 junk = None
1114 junk = None
1104
1115
1105 # A changeset always belongs to itself, so the changenode lookup
1116 # A changeset always belongs to itself, so the changenode lookup
1106 # function for a changenode is identity.
1117 # function for a changenode is identity.
1107 def identity(x):
1118 def identity(x):
1108 return x
1119 return x
1109
1120
1110 # A function generating function. Sets up an environment for the
1121 # A function generating function. Sets up an environment for the
1111 # inner function.
1122 # inner function.
1112 def cmp_by_rev_func(revlog):
1123 def cmp_by_rev_func(revlog):
1113 # Compare two nodes by their revision number in the environment's
1124 # Compare two nodes by their revision number in the environment's
1114 # revision history. Since the revision number both represents the
1125 # revision history. Since the revision number both represents the
1115 # most efficient order to read the nodes in, and represents a
1126 # most efficient order to read the nodes in, and represents a
1116 # topological sorting of the nodes, this function is often useful.
1127 # topological sorting of the nodes, this function is often useful.
1117 def cmp_by_rev(a, b):
1128 def cmp_by_rev(a, b):
1118 return cmp(revlog.rev(a), revlog.rev(b))
1129 return cmp(revlog.rev(a), revlog.rev(b))
1119 return cmp_by_rev
1130 return cmp_by_rev
1120
1131
1121 # If we determine that a particular file or manifest node must be a
1132 # If we determine that a particular file or manifest node must be a
1122 # node that the recipient of the changegroup will already have, we can
1133 # node that the recipient of the changegroup will already have, we can
1123 # also assume the recipient will have all the parents. This function
1134 # also assume the recipient will have all the parents. This function
1124 # prunes them from the set of missing nodes.
1135 # prunes them from the set of missing nodes.
1125 def prune_parents(revlog, hasset, msngset):
1136 def prune_parents(revlog, hasset, msngset):
1126 haslst = hasset.keys()
1137 haslst = hasset.keys()
1127 haslst.sort(cmp_by_rev_func(revlog))
1138 haslst.sort(cmp_by_rev_func(revlog))
1128 for node in haslst:
1139 for node in haslst:
1129 parentlst = [p for p in revlog.parents(node) if p != nullid]
1140 parentlst = [p for p in revlog.parents(node) if p != nullid]
1130 while parentlst:
1141 while parentlst:
1131 n = parentlst.pop()
1142 n = parentlst.pop()
1132 if n not in hasset:
1143 if n not in hasset:
1133 hasset[n] = 1
1144 hasset[n] = 1
1134 p = [p for p in revlog.parents(n) if p != nullid]
1145 p = [p for p in revlog.parents(n) if p != nullid]
1135 parentlst.extend(p)
1146 parentlst.extend(p)
1136 for n in hasset:
1147 for n in hasset:
1137 msngset.pop(n, None)
1148 msngset.pop(n, None)
1138
1149
1139 # This is a function generating function used to set up an environment
1150 # This is a function generating function used to set up an environment
1140 # for the inner function to execute in.
1151 # for the inner function to execute in.
1141 def manifest_and_file_collector(changedfileset):
1152 def manifest_and_file_collector(changedfileset):
1142 # This is an information gathering function that gathers
1153 # This is an information gathering function that gathers
1143 # information from each changeset node that goes out as part of
1154 # information from each changeset node that goes out as part of
1144 # the changegroup. The information gathered is a list of which
1155 # the changegroup. The information gathered is a list of which
1145 # manifest nodes are potentially required (the recipient may
1156 # manifest nodes are potentially required (the recipient may
1146 # already have them) and total list of all files which were
1157 # already have them) and total list of all files which were
1147 # changed in any changeset in the changegroup.
1158 # changed in any changeset in the changegroup.
1148 #
1159 #
1149 # We also remember the first changenode we saw any manifest
1160 # We also remember the first changenode we saw any manifest
1150 # referenced by so we can later determine which changenode 'owns'
1161 # referenced by so we can later determine which changenode 'owns'
1151 # the manifest.
1162 # the manifest.
1152 def collect_manifests_and_files(clnode):
1163 def collect_manifests_and_files(clnode):
1153 c = cl.read(clnode)
1164 c = cl.read(clnode)
1154 for f in c[3]:
1165 for f in c[3]:
1155 # This is to make sure we only have one instance of each
1166 # This is to make sure we only have one instance of each
1156 # filename string for each filename.
1167 # filename string for each filename.
1157 changedfileset.setdefault(f, f)
1168 changedfileset.setdefault(f, f)
1158 msng_mnfst_set.setdefault(c[0], clnode)
1169 msng_mnfst_set.setdefault(c[0], clnode)
1159 return collect_manifests_and_files
1170 return collect_manifests_and_files
1160
1171
1161 # Figure out which manifest nodes (of the ones we think might be part
1172 # Figure out which manifest nodes (of the ones we think might be part
1162 # of the changegroup) the recipient must know about and remove them
1173 # of the changegroup) the recipient must know about and remove them
1163 # from the changegroup.
1174 # from the changegroup.
1164 def prune_manifests():
1175 def prune_manifests():
1165 has_mnfst_set = {}
1176 has_mnfst_set = {}
1166 for n in msng_mnfst_set:
1177 for n in msng_mnfst_set:
1167 # If a 'missing' manifest thinks it belongs to a changenode
1178 # If a 'missing' manifest thinks it belongs to a changenode
1168 # the recipient is assumed to have, obviously the recipient
1179 # the recipient is assumed to have, obviously the recipient
1169 # must have that manifest.
1180 # must have that manifest.
1170 linknode = cl.node(mnfst.linkrev(n))
1181 linknode = cl.node(mnfst.linkrev(n))
1171 if linknode in has_cl_set:
1182 if linknode in has_cl_set:
1172 has_mnfst_set[n] = 1
1183 has_mnfst_set[n] = 1
1173 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1184 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1174
1185
1175 # Use the information collected in collect_manifests_and_files to say
1186 # Use the information collected in collect_manifests_and_files to say
1176 # which changenode any manifestnode belongs to.
1187 # which changenode any manifestnode belongs to.
1177 def lookup_manifest_link(mnfstnode):
1188 def lookup_manifest_link(mnfstnode):
1178 return msng_mnfst_set[mnfstnode]
1189 return msng_mnfst_set[mnfstnode]
1179
1190
1180 # A function generating function that sets up the initial environment
1191 # A function generating function that sets up the initial environment
1181 # the inner function.
1192 # the inner function.
1182 def filenode_collector(changedfiles):
1193 def filenode_collector(changedfiles):
1183 next_rev = [0]
1194 next_rev = [0]
1184 # This gathers information from each manifestnode included in the
1195 # This gathers information from each manifestnode included in the
1185 # changegroup about which filenodes the manifest node references
1196 # changegroup about which filenodes the manifest node references
1186 # so we can include those in the changegroup too.
1197 # so we can include those in the changegroup too.
1187 #
1198 #
1188 # It also remembers which changenode each filenode belongs to. It
1199 # It also remembers which changenode each filenode belongs to. It
1189 # does this by assuming the a filenode belongs to the changenode
1200 # does this by assuming the a filenode belongs to the changenode
1190 # the first manifest that references it belongs to.
1201 # the first manifest that references it belongs to.
1191 def collect_msng_filenodes(mnfstnode):
1202 def collect_msng_filenodes(mnfstnode):
1192 r = mnfst.rev(mnfstnode)
1203 r = mnfst.rev(mnfstnode)
1193 if r == next_rev[0]:
1204 if r == next_rev[0]:
1194 # If the last rev we looked at was the one just previous,
1205 # If the last rev we looked at was the one just previous,
1195 # we only need to see a diff.
1206 # we only need to see a diff.
1196 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1207 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1197 # For each line in the delta
1208 # For each line in the delta
1198 for dline in delta.splitlines():
1209 for dline in delta.splitlines():
1199 # get the filename and filenode for that line
1210 # get the filename and filenode for that line
1200 f, fnode = dline.split('\0')
1211 f, fnode = dline.split('\0')
1201 fnode = bin(fnode[:40])
1212 fnode = bin(fnode[:40])
1202 f = changedfiles.get(f, None)
1213 f = changedfiles.get(f, None)
1203 # And if the file is in the list of files we care
1214 # And if the file is in the list of files we care
1204 # about.
1215 # about.
1205 if f is not None:
1216 if f is not None:
1206 # Get the changenode this manifest belongs to
1217 # Get the changenode this manifest belongs to
1207 clnode = msng_mnfst_set[mnfstnode]
1218 clnode = msng_mnfst_set[mnfstnode]
1208 # Create the set of filenodes for the file if
1219 # Create the set of filenodes for the file if
1209 # there isn't one already.
1220 # there isn't one already.
1210 ndset = msng_filenode_set.setdefault(f, {})
1221 ndset = msng_filenode_set.setdefault(f, {})
1211 # And set the filenode's changelog node to the
1222 # And set the filenode's changelog node to the
1212 # manifest's if it hasn't been set already.
1223 # manifest's if it hasn't been set already.
1213 ndset.setdefault(fnode, clnode)
1224 ndset.setdefault(fnode, clnode)
1214 else:
1225 else:
1215 # Otherwise we need a full manifest.
1226 # Otherwise we need a full manifest.
1216 m = mnfst.read(mnfstnode)
1227 m = mnfst.read(mnfstnode)
1217 # For every file in we care about.
1228 # For every file in we care about.
1218 for f in changedfiles:
1229 for f in changedfiles:
1219 fnode = m.get(f, None)
1230 fnode = m.get(f, None)
1220 # If it's in the manifest
1231 # If it's in the manifest
1221 if fnode is not None:
1232 if fnode is not None:
1222 # See comments above.
1233 # See comments above.
1223 clnode = msng_mnfst_set[mnfstnode]
1234 clnode = msng_mnfst_set[mnfstnode]
1224 ndset = msng_filenode_set.setdefault(f, {})
1235 ndset = msng_filenode_set.setdefault(f, {})
1225 ndset.setdefault(fnode, clnode)
1236 ndset.setdefault(fnode, clnode)
1226 # Remember the revision we hope to see next.
1237 # Remember the revision we hope to see next.
1227 next_rev[0] = r + 1
1238 next_rev[0] = r + 1
1228 return collect_msng_filenodes
1239 return collect_msng_filenodes
1229
1240
1230 # We have a list of filenodes we think we need for a file, lets remove
1241 # We have a list of filenodes we think we need for a file, lets remove
1231 # all those we now the recipient must have.
1242 # all those we now the recipient must have.
1232 def prune_filenodes(f, filerevlog):
1243 def prune_filenodes(f, filerevlog):
1233 msngset = msng_filenode_set[f]
1244 msngset = msng_filenode_set[f]
1234 hasset = {}
1245 hasset = {}
1235 # If a 'missing' filenode thinks it belongs to a changenode we
1246 # If a 'missing' filenode thinks it belongs to a changenode we
1236 # assume the recipient must have, then the recipient must have
1247 # assume the recipient must have, then the recipient must have
1237 # that filenode.
1248 # that filenode.
1238 for n in msngset:
1249 for n in msngset:
1239 clnode = cl.node(filerevlog.linkrev(n))
1250 clnode = cl.node(filerevlog.linkrev(n))
1240 if clnode in has_cl_set:
1251 if clnode in has_cl_set:
1241 hasset[n] = 1
1252 hasset[n] = 1
1242 prune_parents(filerevlog, hasset, msngset)
1253 prune_parents(filerevlog, hasset, msngset)
1243
1254
1244 # A function generator function that sets up the a context for the
1255 # A function generator function that sets up the a context for the
1245 # inner function.
1256 # inner function.
1246 def lookup_filenode_link_func(fname):
1257 def lookup_filenode_link_func(fname):
1247 msngset = msng_filenode_set[fname]
1258 msngset = msng_filenode_set[fname]
1248 # Lookup the changenode the filenode belongs to.
1259 # Lookup the changenode the filenode belongs to.
1249 def lookup_filenode_link(fnode):
1260 def lookup_filenode_link(fnode):
1250 return msngset[fnode]
1261 return msngset[fnode]
1251 return lookup_filenode_link
1262 return lookup_filenode_link
1252
1263
1253 # Now that we have all theses utility functions to help out and
1264 # Now that we have all theses utility functions to help out and
1254 # logically divide up the task, generate the group.
1265 # logically divide up the task, generate the group.
1255 def gengroup():
1266 def gengroup():
1256 # The set of changed files starts empty.
1267 # The set of changed files starts empty.
1257 changedfiles = {}
1268 changedfiles = {}
1258 # Create a changenode group generator that will call our functions
1269 # Create a changenode group generator that will call our functions
1259 # back to lookup the owning changenode and collect information.
1270 # back to lookup the owning changenode and collect information.
1260 group = cl.group(msng_cl_lst, identity,
1271 group = cl.group(msng_cl_lst, identity,
1261 manifest_and_file_collector(changedfiles))
1272 manifest_and_file_collector(changedfiles))
1262 for chnk in group:
1273 for chnk in group:
1263 yield chnk
1274 yield chnk
1264
1275
1265 # The list of manifests has been collected by the generator
1276 # The list of manifests has been collected by the generator
1266 # calling our functions back.
1277 # calling our functions back.
1267 prune_manifests()
1278 prune_manifests()
1268 msng_mnfst_lst = msng_mnfst_set.keys()
1279 msng_mnfst_lst = msng_mnfst_set.keys()
1269 # Sort the manifestnodes by revision number.
1280 # Sort the manifestnodes by revision number.
1270 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1281 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1271 # Create a generator for the manifestnodes that calls our lookup
1282 # Create a generator for the manifestnodes that calls our lookup
1272 # and data collection functions back.
1283 # and data collection functions back.
1273 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1284 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1274 filenode_collector(changedfiles))
1285 filenode_collector(changedfiles))
1275 for chnk in group:
1286 for chnk in group:
1276 yield chnk
1287 yield chnk
1277
1288
1278 # These are no longer needed, dereference and toss the memory for
1289 # These are no longer needed, dereference and toss the memory for
1279 # them.
1290 # them.
1280 msng_mnfst_lst = None
1291 msng_mnfst_lst = None
1281 msng_mnfst_set.clear()
1292 msng_mnfst_set.clear()
1282
1293
1283 changedfiles = changedfiles.keys()
1294 changedfiles = changedfiles.keys()
1284 changedfiles.sort()
1295 changedfiles.sort()
1285 # Go through all our files in order sorted by name.
1296 # Go through all our files in order sorted by name.
1286 for fname in changedfiles:
1297 for fname in changedfiles:
1287 filerevlog = self.file(fname)
1298 filerevlog = self.file(fname)
1288 # Toss out the filenodes that the recipient isn't really
1299 # Toss out the filenodes that the recipient isn't really
1289 # missing.
1300 # missing.
1290 if msng_filenode_set.has_key(fname):
1301 if msng_filenode_set.has_key(fname):
1291 prune_filenodes(fname, filerevlog)
1302 prune_filenodes(fname, filerevlog)
1292 msng_filenode_lst = msng_filenode_set[fname].keys()
1303 msng_filenode_lst = msng_filenode_set[fname].keys()
1293 else:
1304 else:
1294 msng_filenode_lst = []
1305 msng_filenode_lst = []
1295 # If any filenodes are left, generate the group for them,
1306 # If any filenodes are left, generate the group for them,
1296 # otherwise don't bother.
1307 # otherwise don't bother.
1297 if len(msng_filenode_lst) > 0:
1308 if len(msng_filenode_lst) > 0:
1298 yield changegroup.genchunk(fname)
1309 yield changegroup.genchunk(fname)
1299 # Sort the filenodes by their revision #
1310 # Sort the filenodes by their revision #
1300 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1311 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1301 # Create a group generator and only pass in a changenode
1312 # Create a group generator and only pass in a changenode
1302 # lookup function as we need to collect no information
1313 # lookup function as we need to collect no information
1303 # from filenodes.
1314 # from filenodes.
1304 group = filerevlog.group(msng_filenode_lst,
1315 group = filerevlog.group(msng_filenode_lst,
1305 lookup_filenode_link_func(fname))
1316 lookup_filenode_link_func(fname))
1306 for chnk in group:
1317 for chnk in group:
1307 yield chnk
1318 yield chnk
1308 if msng_filenode_set.has_key(fname):
1319 if msng_filenode_set.has_key(fname):
1309 # Don't need this anymore, toss it to free memory.
1320 # Don't need this anymore, toss it to free memory.
1310 del msng_filenode_set[fname]
1321 del msng_filenode_set[fname]
1311 # Signal that no more groups are left.
1322 # Signal that no more groups are left.
1312 yield changegroup.closechunk()
1323 yield changegroup.closechunk()
1313
1324
1314 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1325 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1315
1326
1316 return util.chunkbuffer(gengroup())
1327 return util.chunkbuffer(gengroup())
1317
1328
1318 def changegroup(self, basenodes, source):
1329 def changegroup(self, basenodes, source):
1319 """Generate a changegroup of all nodes that we have that a recipient
1330 """Generate a changegroup of all nodes that we have that a recipient
1320 doesn't.
1331 doesn't.
1321
1332
1322 This is much easier than the previous function as we can assume that
1333 This is much easier than the previous function as we can assume that
1323 the recipient has any changenode we aren't sending them."""
1334 the recipient has any changenode we aren't sending them."""
1324
1335
1325 self.hook('preoutgoing', throw=True, source=source)
1336 self.hook('preoutgoing', throw=True, source=source)
1326
1337
1327 cl = self.changelog
1338 cl = self.changelog
1328 nodes = cl.nodesbetween(basenodes, None)[0]
1339 nodes = cl.nodesbetween(basenodes, None)[0]
1329 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1340 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1330
1341
1331 def identity(x):
1342 def identity(x):
1332 return x
1343 return x
1333
1344
1334 def gennodelst(revlog):
1345 def gennodelst(revlog):
1335 for r in xrange(0, revlog.count()):
1346 for r in xrange(0, revlog.count()):
1336 n = revlog.node(r)
1347 n = revlog.node(r)
1337 if revlog.linkrev(n) in revset:
1348 if revlog.linkrev(n) in revset:
1338 yield n
1349 yield n
1339
1350
1340 def changed_file_collector(changedfileset):
1351 def changed_file_collector(changedfileset):
1341 def collect_changed_files(clnode):
1352 def collect_changed_files(clnode):
1342 c = cl.read(clnode)
1353 c = cl.read(clnode)
1343 for fname in c[3]:
1354 for fname in c[3]:
1344 changedfileset[fname] = 1
1355 changedfileset[fname] = 1
1345 return collect_changed_files
1356 return collect_changed_files
1346
1357
1347 def lookuprevlink_func(revlog):
1358 def lookuprevlink_func(revlog):
1348 def lookuprevlink(n):
1359 def lookuprevlink(n):
1349 return cl.node(revlog.linkrev(n))
1360 return cl.node(revlog.linkrev(n))
1350 return lookuprevlink
1361 return lookuprevlink
1351
1362
1352 def gengroup():
1363 def gengroup():
1353 # construct a list of all changed files
1364 # construct a list of all changed files
1354 changedfiles = {}
1365 changedfiles = {}
1355
1366
1356 for chnk in cl.group(nodes, identity,
1367 for chnk in cl.group(nodes, identity,
1357 changed_file_collector(changedfiles)):
1368 changed_file_collector(changedfiles)):
1358 yield chnk
1369 yield chnk
1359 changedfiles = changedfiles.keys()
1370 changedfiles = changedfiles.keys()
1360 changedfiles.sort()
1371 changedfiles.sort()
1361
1372
1362 mnfst = self.manifest
1373 mnfst = self.manifest
1363 nodeiter = gennodelst(mnfst)
1374 nodeiter = gennodelst(mnfst)
1364 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1375 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1365 yield chnk
1376 yield chnk
1366
1377
1367 for fname in changedfiles:
1378 for fname in changedfiles:
1368 filerevlog = self.file(fname)
1379 filerevlog = self.file(fname)
1369 nodeiter = gennodelst(filerevlog)
1380 nodeiter = gennodelst(filerevlog)
1370 nodeiter = list(nodeiter)
1381 nodeiter = list(nodeiter)
1371 if nodeiter:
1382 if nodeiter:
1372 yield changegroup.genchunk(fname)
1383 yield changegroup.genchunk(fname)
1373 lookup = lookuprevlink_func(filerevlog)
1384 lookup = lookuprevlink_func(filerevlog)
1374 for chnk in filerevlog.group(nodeiter, lookup):
1385 for chnk in filerevlog.group(nodeiter, lookup):
1375 yield chnk
1386 yield chnk
1376
1387
1377 yield changegroup.closechunk()
1388 yield changegroup.closechunk()
1378 self.hook('outgoing', node=hex(nodes[0]), source=source)
1389 self.hook('outgoing', node=hex(nodes[0]), source=source)
1379
1390
1380 return util.chunkbuffer(gengroup())
1391 return util.chunkbuffer(gengroup())
1381
1392
1382 def addchangegroup(self, source):
1393 def addchangegroup(self, source):
1383 """add changegroup to repo.
1394 """add changegroup to repo.
1384 returns number of heads modified or added + 1."""
1395 returns number of heads modified or added + 1."""
1385
1396
1386 def csmap(x):
1397 def csmap(x):
1387 self.ui.debug(_("add changeset %s\n") % short(x))
1398 self.ui.debug(_("add changeset %s\n") % short(x))
1388 return cl.count()
1399 return cl.count()
1389
1400
1390 def revmap(x):
1401 def revmap(x):
1391 return cl.rev(x)
1402 return cl.rev(x)
1392
1403
1393 if not source:
1404 if not source:
1394 return 0
1405 return 0
1395
1406
1396 self.hook('prechangegroup', throw=True)
1407 self.hook('prechangegroup', throw=True)
1397
1408
1398 changesets = files = revisions = 0
1409 changesets = files = revisions = 0
1399
1410
1400 tr = self.transaction()
1411 tr = self.transaction()
1401
1412
1402 # write changelog and manifest data to temp files so
1413 # write changelog and manifest data to temp files so
1403 # concurrent readers will not see inconsistent view
1414 # concurrent readers will not see inconsistent view
1404 cl = appendfile.appendchangelog(self.opener)
1415 cl = appendfile.appendchangelog(self.opener)
1405
1416
1406 oldheads = len(cl.heads())
1417 oldheads = len(cl.heads())
1407
1418
1408 # pull off the changeset group
1419 # pull off the changeset group
1409 self.ui.status(_("adding changesets\n"))
1420 self.ui.status(_("adding changesets\n"))
1410 co = cl.tip()
1421 co = cl.tip()
1411 chunkiter = changegroup.chunkiter(source)
1422 chunkiter = changegroup.chunkiter(source)
1412 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1423 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1413 cnr, cor = map(cl.rev, (cn, co))
1424 cnr, cor = map(cl.rev, (cn, co))
1414 if cn == nullid:
1425 if cn == nullid:
1415 cnr = cor
1426 cnr = cor
1416 changesets = cnr - cor
1427 changesets = cnr - cor
1417
1428
1418 mf = appendfile.appendmanifest(self.opener)
1429 mf = appendfile.appendmanifest(self.opener)
1419
1430
1420 # pull off the manifest group
1431 # pull off the manifest group
1421 self.ui.status(_("adding manifests\n"))
1432 self.ui.status(_("adding manifests\n"))
1422 mm = mf.tip()
1433 mm = mf.tip()
1423 chunkiter = changegroup.chunkiter(source)
1434 chunkiter = changegroup.chunkiter(source)
1424 mo = mf.addgroup(chunkiter, revmap, tr)
1435 mo = mf.addgroup(chunkiter, revmap, tr)
1425
1436
1426 # process the files
1437 # process the files
1427 self.ui.status(_("adding file changes\n"))
1438 self.ui.status(_("adding file changes\n"))
1428 while 1:
1439 while 1:
1429 f = changegroup.getchunk(source)
1440 f = changegroup.getchunk(source)
1430 if not f:
1441 if not f:
1431 break
1442 break
1432 self.ui.debug(_("adding %s revisions\n") % f)
1443 self.ui.debug(_("adding %s revisions\n") % f)
1433 fl = self.file(f)
1444 fl = self.file(f)
1434 o = fl.count()
1445 o = fl.count()
1435 chunkiter = changegroup.chunkiter(source)
1446 chunkiter = changegroup.chunkiter(source)
1436 n = fl.addgroup(chunkiter, revmap, tr)
1447 n = fl.addgroup(chunkiter, revmap, tr)
1437 revisions += fl.count() - o
1448 revisions += fl.count() - o
1438 files += 1
1449 files += 1
1439
1450
1440 # write order here is important so concurrent readers will see
1451 # write order here is important so concurrent readers will see
1441 # consistent view of repo
1452 # consistent view of repo
1442 mf.writedata()
1453 mf.writedata()
1443 cl.writedata()
1454 cl.writedata()
1444
1455
1445 # make changelog and manifest see real files again
1456 # make changelog and manifest see real files again
1446 self.changelog = changelog.changelog(self.opener)
1457 self.changelog = changelog.changelog(self.opener)
1447 self.manifest = manifest.manifest(self.opener)
1458 self.manifest = manifest.manifest(self.opener)
1448
1459
1449 newheads = len(self.changelog.heads())
1460 newheads = len(self.changelog.heads())
1450 heads = ""
1461 heads = ""
1451 if oldheads and newheads > oldheads:
1462 if oldheads and newheads > oldheads:
1452 heads = _(" (+%d heads)") % (newheads - oldheads)
1463 heads = _(" (+%d heads)") % (newheads - oldheads)
1453
1464
1454 self.ui.status(_("added %d changesets"
1465 self.ui.status(_("added %d changesets"
1455 " with %d changes to %d files%s\n")
1466 " with %d changes to %d files%s\n")
1456 % (changesets, revisions, files, heads))
1467 % (changesets, revisions, files, heads))
1457
1468
1458 self.hook('pretxnchangegroup', throw=True,
1469 self.hook('pretxnchangegroup', throw=True,
1459 node=hex(self.changelog.node(cor+1)))
1470 node=hex(self.changelog.node(cor+1)))
1460
1471
1461 tr.close()
1472 tr.close()
1462
1473
1463 if changesets > 0:
1474 if changesets > 0:
1464 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1475 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1465
1476
1466 for i in range(cor + 1, cnr + 1):
1477 for i in range(cor + 1, cnr + 1):
1467 self.hook("incoming", node=hex(self.changelog.node(i)))
1478 self.hook("incoming", node=hex(self.changelog.node(i)))
1468
1479
1469 return newheads - oldheads + 1
1480 return newheads - oldheads + 1
1470
1481
1471 def update(self, node, allow=False, force=False, choose=None,
1482 def update(self, node, allow=False, force=False, choose=None,
1472 moddirstate=True, forcemerge=False, wlock=None):
1483 moddirstate=True, forcemerge=False, wlock=None):
1473 pl = self.dirstate.parents()
1484 pl = self.dirstate.parents()
1474 if not force and pl[1] != nullid:
1485 if not force and pl[1] != nullid:
1475 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1486 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1476 return 1
1487 return 1
1477
1488
1478 err = False
1489 err = False
1479
1490
1480 p1, p2 = pl[0], node
1491 p1, p2 = pl[0], node
1481 pa = self.changelog.ancestor(p1, p2)
1492 pa = self.changelog.ancestor(p1, p2)
1482 m1n = self.changelog.read(p1)[0]
1493 m1n = self.changelog.read(p1)[0]
1483 m2n = self.changelog.read(p2)[0]
1494 m2n = self.changelog.read(p2)[0]
1484 man = self.manifest.ancestor(m1n, m2n)
1495 man = self.manifest.ancestor(m1n, m2n)
1485 m1 = self.manifest.read(m1n)
1496 m1 = self.manifest.read(m1n)
1486 mf1 = self.manifest.readflags(m1n)
1497 mf1 = self.manifest.readflags(m1n)
1487 m2 = self.manifest.read(m2n).copy()
1498 m2 = self.manifest.read(m2n).copy()
1488 mf2 = self.manifest.readflags(m2n)
1499 mf2 = self.manifest.readflags(m2n)
1489 ma = self.manifest.read(man)
1500 ma = self.manifest.read(man)
1490 mfa = self.manifest.readflags(man)
1501 mfa = self.manifest.readflags(man)
1491
1502
1492 modified, added, removed, deleted, unknown = self.changes()
1503 modified, added, removed, deleted, unknown = self.changes()
1493
1504
1494 # is this a jump, or a merge? i.e. is there a linear path
1505 # is this a jump, or a merge? i.e. is there a linear path
1495 # from p1 to p2?
1506 # from p1 to p2?
1496 linear_path = (pa == p1 or pa == p2)
1507 linear_path = (pa == p1 or pa == p2)
1497
1508
1498 if allow and linear_path:
1509 if allow and linear_path:
1499 raise util.Abort(_("there is nothing to merge, "
1510 raise util.Abort(_("there is nothing to merge, "
1500 "just use 'hg update'"))
1511 "just use 'hg update'"))
1501 if allow and not forcemerge:
1512 if allow and not forcemerge:
1502 if modified or added or removed:
1513 if modified or added or removed:
1503 raise util.Abort(_("outstanding uncommitted changes"))
1514 raise util.Abort(_("outstanding uncommitted changes"))
1504 if not forcemerge and not force:
1515 if not forcemerge and not force:
1505 for f in unknown:
1516 for f in unknown:
1506 if f in m2:
1517 if f in m2:
1507 t1 = self.wread(f)
1518 t1 = self.wread(f)
1508 t2 = self.file(f).read(m2[f])
1519 t2 = self.file(f).read(m2[f])
1509 if cmp(t1, t2) != 0:
1520 if cmp(t1, t2) != 0:
1510 raise util.Abort(_("'%s' already exists in the working"
1521 raise util.Abort(_("'%s' already exists in the working"
1511 " dir and differs from remote") % f)
1522 " dir and differs from remote") % f)
1512
1523
1513 # resolve the manifest to determine which files
1524 # resolve the manifest to determine which files
1514 # we care about merging
1525 # we care about merging
1515 self.ui.note(_("resolving manifests\n"))
1526 self.ui.note(_("resolving manifests\n"))
1516 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1527 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1517 (force, allow, moddirstate, linear_path))
1528 (force, allow, moddirstate, linear_path))
1518 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1529 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1519 (short(man), short(m1n), short(m2n)))
1530 (short(man), short(m1n), short(m2n)))
1520
1531
1521 merge = {}
1532 merge = {}
1522 get = {}
1533 get = {}
1523 remove = []
1534 remove = []
1524
1535
1525 # construct a working dir manifest
1536 # construct a working dir manifest
1526 mw = m1.copy()
1537 mw = m1.copy()
1527 mfw = mf1.copy()
1538 mfw = mf1.copy()
1528 umap = dict.fromkeys(unknown)
1539 umap = dict.fromkeys(unknown)
1529
1540
1530 for f in added + modified + unknown:
1541 for f in added + modified + unknown:
1531 mw[f] = ""
1542 mw[f] = ""
1532 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1543 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1533
1544
1534 if moddirstate and not wlock:
1545 if moddirstate and not wlock:
1535 wlock = self.wlock()
1546 wlock = self.wlock()
1536
1547
1537 for f in deleted + removed:
1548 for f in deleted + removed:
1538 if f in mw:
1549 if f in mw:
1539 del mw[f]
1550 del mw[f]
1540
1551
1541 # If we're jumping between revisions (as opposed to merging),
1552 # If we're jumping between revisions (as opposed to merging),
1542 # and if neither the working directory nor the target rev has
1553 # and if neither the working directory nor the target rev has
1543 # the file, then we need to remove it from the dirstate, to
1554 # the file, then we need to remove it from the dirstate, to
1544 # prevent the dirstate from listing the file when it is no
1555 # prevent the dirstate from listing the file when it is no
1545 # longer in the manifest.
1556 # longer in the manifest.
1546 if moddirstate and linear_path and f not in m2:
1557 if moddirstate and linear_path and f not in m2:
1547 self.dirstate.forget((f,))
1558 self.dirstate.forget((f,))
1548
1559
1549 # Compare manifests
1560 # Compare manifests
1550 for f, n in mw.iteritems():
1561 for f, n in mw.iteritems():
1551 if choose and not choose(f):
1562 if choose and not choose(f):
1552 continue
1563 continue
1553 if f in m2:
1564 if f in m2:
1554 s = 0
1565 s = 0
1555
1566
1556 # is the wfile new since m1, and match m2?
1567 # is the wfile new since m1, and match m2?
1557 if f not in m1:
1568 if f not in m1:
1558 t1 = self.wread(f)
1569 t1 = self.wread(f)
1559 t2 = self.file(f).read(m2[f])
1570 t2 = self.file(f).read(m2[f])
1560 if cmp(t1, t2) == 0:
1571 if cmp(t1, t2) == 0:
1561 n = m2[f]
1572 n = m2[f]
1562 del t1, t2
1573 del t1, t2
1563
1574
1564 # are files different?
1575 # are files different?
1565 if n != m2[f]:
1576 if n != m2[f]:
1566 a = ma.get(f, nullid)
1577 a = ma.get(f, nullid)
1567 # are both different from the ancestor?
1578 # are both different from the ancestor?
1568 if n != a and m2[f] != a:
1579 if n != a and m2[f] != a:
1569 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1580 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1570 # merge executable bits
1581 # merge executable bits
1571 # "if we changed or they changed, change in merge"
1582 # "if we changed or they changed, change in merge"
1572 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1583 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1573 mode = ((a^b) | (a^c)) ^ a
1584 mode = ((a^b) | (a^c)) ^ a
1574 merge[f] = (m1.get(f, nullid), m2[f], mode)
1585 merge[f] = (m1.get(f, nullid), m2[f], mode)
1575 s = 1
1586 s = 1
1576 # are we clobbering?
1587 # are we clobbering?
1577 # is remote's version newer?
1588 # is remote's version newer?
1578 # or are we going back in time?
1589 # or are we going back in time?
1579 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1590 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1580 self.ui.debug(_(" remote %s is newer, get\n") % f)
1591 self.ui.debug(_(" remote %s is newer, get\n") % f)
1581 get[f] = m2[f]
1592 get[f] = m2[f]
1582 s = 1
1593 s = 1
1583 elif f in umap:
1594 elif f in umap:
1584 # this unknown file is the same as the checkout
1595 # this unknown file is the same as the checkout
1585 get[f] = m2[f]
1596 get[f] = m2[f]
1586
1597
1587 if not s and mfw[f] != mf2[f]:
1598 if not s and mfw[f] != mf2[f]:
1588 if force:
1599 if force:
1589 self.ui.debug(_(" updating permissions for %s\n") % f)
1600 self.ui.debug(_(" updating permissions for %s\n") % f)
1590 util.set_exec(self.wjoin(f), mf2[f])
1601 util.set_exec(self.wjoin(f), mf2[f])
1591 else:
1602 else:
1592 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1603 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1593 mode = ((a^b) | (a^c)) ^ a
1604 mode = ((a^b) | (a^c)) ^ a
1594 if mode != b:
1605 if mode != b:
1595 self.ui.debug(_(" updating permissions for %s\n")
1606 self.ui.debug(_(" updating permissions for %s\n")
1596 % f)
1607 % f)
1597 util.set_exec(self.wjoin(f), mode)
1608 util.set_exec(self.wjoin(f), mode)
1598 del m2[f]
1609 del m2[f]
1599 elif f in ma:
1610 elif f in ma:
1600 if n != ma[f]:
1611 if n != ma[f]:
1601 r = _("d")
1612 r = _("d")
1602 if not force and (linear_path or allow):
1613 if not force and (linear_path or allow):
1603 r = self.ui.prompt(
1614 r = self.ui.prompt(
1604 (_(" local changed %s which remote deleted\n") % f) +
1615 (_(" local changed %s which remote deleted\n") % f) +
1605 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1616 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1606 if r == _("d"):
1617 if r == _("d"):
1607 remove.append(f)
1618 remove.append(f)
1608 else:
1619 else:
1609 self.ui.debug(_("other deleted %s\n") % f)
1620 self.ui.debug(_("other deleted %s\n") % f)
1610 remove.append(f) # other deleted it
1621 remove.append(f) # other deleted it
1611 else:
1622 else:
1612 # file is created on branch or in working directory
1623 # file is created on branch or in working directory
1613 if force and f not in umap:
1624 if force and f not in umap:
1614 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1625 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1615 remove.append(f)
1626 remove.append(f)
1616 elif n == m1.get(f, nullid): # same as parent
1627 elif n == m1.get(f, nullid): # same as parent
1617 if p2 == pa: # going backwards?
1628 if p2 == pa: # going backwards?
1618 self.ui.debug(_("remote deleted %s\n") % f)
1629 self.ui.debug(_("remote deleted %s\n") % f)
1619 remove.append(f)
1630 remove.append(f)
1620 else:
1631 else:
1621 self.ui.debug(_("local modified %s, keeping\n") % f)
1632 self.ui.debug(_("local modified %s, keeping\n") % f)
1622 else:
1633 else:
1623 self.ui.debug(_("working dir created %s, keeping\n") % f)
1634 self.ui.debug(_("working dir created %s, keeping\n") % f)
1624
1635
1625 for f, n in m2.iteritems():
1636 for f, n in m2.iteritems():
1626 if choose and not choose(f):
1637 if choose and not choose(f):
1627 continue
1638 continue
1628 if f[0] == "/":
1639 if f[0] == "/":
1629 continue
1640 continue
1630 if f in ma and n != ma[f]:
1641 if f in ma and n != ma[f]:
1631 r = _("k")
1642 r = _("k")
1632 if not force and (linear_path or allow):
1643 if not force and (linear_path or allow):
1633 r = self.ui.prompt(
1644 r = self.ui.prompt(
1634 (_("remote changed %s which local deleted\n") % f) +
1645 (_("remote changed %s which local deleted\n") % f) +
1635 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1646 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1636 if r == _("k"):
1647 if r == _("k"):
1637 get[f] = n
1648 get[f] = n
1638 elif f not in ma:
1649 elif f not in ma:
1639 self.ui.debug(_("remote created %s\n") % f)
1650 self.ui.debug(_("remote created %s\n") % f)
1640 get[f] = n
1651 get[f] = n
1641 else:
1652 else:
1642 if force or p2 == pa: # going backwards?
1653 if force or p2 == pa: # going backwards?
1643 self.ui.debug(_("local deleted %s, recreating\n") % f)
1654 self.ui.debug(_("local deleted %s, recreating\n") % f)
1644 get[f] = n
1655 get[f] = n
1645 else:
1656 else:
1646 self.ui.debug(_("local deleted %s\n") % f)
1657 self.ui.debug(_("local deleted %s\n") % f)
1647
1658
1648 del mw, m1, m2, ma
1659 del mw, m1, m2, ma
1649
1660
1650 if force:
1661 if force:
1651 for f in merge:
1662 for f in merge:
1652 get[f] = merge[f][1]
1663 get[f] = merge[f][1]
1653 merge = {}
1664 merge = {}
1654
1665
1655 if linear_path or force:
1666 if linear_path or force:
1656 # we don't need to do any magic, just jump to the new rev
1667 # we don't need to do any magic, just jump to the new rev
1657 branch_merge = False
1668 branch_merge = False
1658 p1, p2 = p2, nullid
1669 p1, p2 = p2, nullid
1659 else:
1670 else:
1660 if not allow:
1671 if not allow:
1661 self.ui.status(_("this update spans a branch"
1672 self.ui.status(_("this update spans a branch"
1662 " affecting the following files:\n"))
1673 " affecting the following files:\n"))
1663 fl = merge.keys() + get.keys()
1674 fl = merge.keys() + get.keys()
1664 fl.sort()
1675 fl.sort()
1665 for f in fl:
1676 for f in fl:
1666 cf = ""
1677 cf = ""
1667 if f in merge:
1678 if f in merge:
1668 cf = _(" (resolve)")
1679 cf = _(" (resolve)")
1669 self.ui.status(" %s%s\n" % (f, cf))
1680 self.ui.status(" %s%s\n" % (f, cf))
1670 self.ui.warn(_("aborting update spanning branches!\n"))
1681 self.ui.warn(_("aborting update spanning branches!\n"))
1671 self.ui.status(_("(use 'hg merge' to merge across branches"
1682 self.ui.status(_("(use 'hg merge' to merge across branches"
1672 " or 'hg update -C' to lose changes)\n"))
1683 " or 'hg update -C' to lose changes)\n"))
1673 return 1
1684 return 1
1674 branch_merge = True
1685 branch_merge = True
1675
1686
1676 # get the files we don't need to change
1687 # get the files we don't need to change
1677 files = get.keys()
1688 files = get.keys()
1678 files.sort()
1689 files.sort()
1679 for f in files:
1690 for f in files:
1680 if f[0] == "/":
1691 if f[0] == "/":
1681 continue
1692 continue
1682 self.ui.note(_("getting %s\n") % f)
1693 self.ui.note(_("getting %s\n") % f)
1683 t = self.file(f).read(get[f])
1694 t = self.file(f).read(get[f])
1684 self.wwrite(f, t)
1695 self.wwrite(f, t)
1685 util.set_exec(self.wjoin(f), mf2[f])
1696 util.set_exec(self.wjoin(f), mf2[f])
1686 if moddirstate:
1697 if moddirstate:
1687 if branch_merge:
1698 if branch_merge:
1688 self.dirstate.update([f], 'n', st_mtime=-1)
1699 self.dirstate.update([f], 'n', st_mtime=-1)
1689 else:
1700 else:
1690 self.dirstate.update([f], 'n')
1701 self.dirstate.update([f], 'n')
1691
1702
1692 # merge the tricky bits
1703 # merge the tricky bits
1693 failedmerge = []
1704 failedmerge = []
1694 files = merge.keys()
1705 files = merge.keys()
1695 files.sort()
1706 files.sort()
1696 xp1 = hex(p1)
1707 xp1 = hex(p1)
1697 xp2 = hex(p2)
1708 xp2 = hex(p2)
1698 for f in files:
1709 for f in files:
1699 self.ui.status(_("merging %s\n") % f)
1710 self.ui.status(_("merging %s\n") % f)
1700 my, other, flag = merge[f]
1711 my, other, flag = merge[f]
1701 ret = self.merge3(f, my, other, xp1, xp2)
1712 ret = self.merge3(f, my, other, xp1, xp2)
1702 if ret:
1713 if ret:
1703 err = True
1714 err = True
1704 failedmerge.append(f)
1715 failedmerge.append(f)
1705 util.set_exec(self.wjoin(f), flag)
1716 util.set_exec(self.wjoin(f), flag)
1706 if moddirstate:
1717 if moddirstate:
1707 if branch_merge:
1718 if branch_merge:
1708 # We've done a branch merge, mark this file as merged
1719 # We've done a branch merge, mark this file as merged
1709 # so that we properly record the merger later
1720 # so that we properly record the merger later
1710 self.dirstate.update([f], 'm')
1721 self.dirstate.update([f], 'm')
1711 else:
1722 else:
1712 # We've update-merged a locally modified file, so
1723 # We've update-merged a locally modified file, so
1713 # we set the dirstate to emulate a normal checkout
1724 # we set the dirstate to emulate a normal checkout
1714 # of that file some time in the past. Thus our
1725 # of that file some time in the past. Thus our
1715 # merge will appear as a normal local file
1726 # merge will appear as a normal local file
1716 # modification.
1727 # modification.
1717 f_len = len(self.file(f).read(other))
1728 f_len = len(self.file(f).read(other))
1718 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1729 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1719
1730
1720 remove.sort()
1731 remove.sort()
1721 for f in remove:
1732 for f in remove:
1722 self.ui.note(_("removing %s\n") % f)
1733 self.ui.note(_("removing %s\n") % f)
1723 util.audit_path(f)
1734 util.audit_path(f)
1724 try:
1735 try:
1725 util.unlink(self.wjoin(f))
1736 util.unlink(self.wjoin(f))
1726 except OSError, inst:
1737 except OSError, inst:
1727 if inst.errno != errno.ENOENT:
1738 if inst.errno != errno.ENOENT:
1728 self.ui.warn(_("update failed to remove %s: %s!\n") %
1739 self.ui.warn(_("update failed to remove %s: %s!\n") %
1729 (f, inst.strerror))
1740 (f, inst.strerror))
1730 if moddirstate:
1741 if moddirstate:
1731 if branch_merge:
1742 if branch_merge:
1732 self.dirstate.update(remove, 'r')
1743 self.dirstate.update(remove, 'r')
1733 else:
1744 else:
1734 self.dirstate.forget(remove)
1745 self.dirstate.forget(remove)
1735
1746
1736 if moddirstate:
1747 if moddirstate:
1737 self.dirstate.setparents(p1, p2)
1748 self.dirstate.setparents(p1, p2)
1738
1749
1739 stat = ((len(get), _("updated")),
1750 stat = ((len(get), _("updated")),
1740 (len(merge) - len(failedmerge), _("merged")),
1751 (len(merge) - len(failedmerge), _("merged")),
1741 (len(remove), _("removed")),
1752 (len(remove), _("removed")),
1742 (len(failedmerge), _("unresolved")))
1753 (len(failedmerge), _("unresolved")))
1743 note = ", ".join([_("%d files %s") % s for s in stat])
1754 note = ", ".join([_("%d files %s") % s for s in stat])
1744 self.ui.note("%s\n" % note)
1755 self.ui.note("%s\n" % note)
1745 if moddirstate and branch_merge:
1756 if moddirstate and branch_merge:
1746 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1757 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1747
1758
1748 return err
1759 return err
1749
1760
1750 def merge3(self, fn, my, other, p1, p2):
1761 def merge3(self, fn, my, other, p1, p2):
1751 """perform a 3-way merge in the working directory"""
1762 """perform a 3-way merge in the working directory"""
1752
1763
1753 def temp(prefix, node):
1764 def temp(prefix, node):
1754 pre = "%s~%s." % (os.path.basename(fn), prefix)
1765 pre = "%s~%s." % (os.path.basename(fn), prefix)
1755 (fd, name) = tempfile.mkstemp("", pre)
1766 (fd, name) = tempfile.mkstemp("", pre)
1756 f = os.fdopen(fd, "wb")
1767 f = os.fdopen(fd, "wb")
1757 self.wwrite(fn, fl.read(node), f)
1768 self.wwrite(fn, fl.read(node), f)
1758 f.close()
1769 f.close()
1759 return name
1770 return name
1760
1771
1761 fl = self.file(fn)
1772 fl = self.file(fn)
1762 base = fl.ancestor(my, other)
1773 base = fl.ancestor(my, other)
1763 a = self.wjoin(fn)
1774 a = self.wjoin(fn)
1764 b = temp("base", base)
1775 b = temp("base", base)
1765 c = temp("other", other)
1776 c = temp("other", other)
1766
1777
1767 self.ui.note(_("resolving %s\n") % fn)
1778 self.ui.note(_("resolving %s\n") % fn)
1768 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1779 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1769 (fn, short(my), short(other), short(base)))
1780 (fn, short(my), short(other), short(base)))
1770
1781
1771 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1782 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1772 or "hgmerge")
1783 or "hgmerge")
1773 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1784 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1774 environ={'HG_FILE': fn,
1785 environ={'HG_FILE': fn,
1775 'HG_MY_NODE': p1,
1786 'HG_MY_NODE': p1,
1776 'HG_OTHER_NODE': p2,
1787 'HG_OTHER_NODE': p2,
1777 'HG_FILE_MY_NODE': hex(my),
1788 'HG_FILE_MY_NODE': hex(my),
1778 'HG_FILE_OTHER_NODE': hex(other),
1789 'HG_FILE_OTHER_NODE': hex(other),
1779 'HG_FILE_BASE_NODE': hex(base)})
1790 'HG_FILE_BASE_NODE': hex(base)})
1780 if r:
1791 if r:
1781 self.ui.warn(_("merging %s failed!\n") % fn)
1792 self.ui.warn(_("merging %s failed!\n") % fn)
1782
1793
1783 os.unlink(b)
1794 os.unlink(b)
1784 os.unlink(c)
1795 os.unlink(c)
1785 return r
1796 return r
1786
1797
1787 def verify(self):
1798 def verify(self):
1788 filelinkrevs = {}
1799 filelinkrevs = {}
1789 filenodes = {}
1800 filenodes = {}
1790 changesets = revisions = files = 0
1801 changesets = revisions = files = 0
1791 errors = [0]
1802 errors = [0]
1792 neededmanifests = {}
1803 neededmanifests = {}
1793
1804
1794 def err(msg):
1805 def err(msg):
1795 self.ui.warn(msg + "\n")
1806 self.ui.warn(msg + "\n")
1796 errors[0] += 1
1807 errors[0] += 1
1797
1808
1798 def checksize(obj, name):
1809 def checksize(obj, name):
1799 d = obj.checksize()
1810 d = obj.checksize()
1800 if d[0]:
1811 if d[0]:
1801 err(_("%s data length off by %d bytes") % (name, d[0]))
1812 err(_("%s data length off by %d bytes") % (name, d[0]))
1802 if d[1]:
1813 if d[1]:
1803 err(_("%s index contains %d extra bytes") % (name, d[1]))
1814 err(_("%s index contains %d extra bytes") % (name, d[1]))
1804
1815
1805 seen = {}
1816 seen = {}
1806 self.ui.status(_("checking changesets\n"))
1817 self.ui.status(_("checking changesets\n"))
1807 checksize(self.changelog, "changelog")
1818 checksize(self.changelog, "changelog")
1808
1819
1809 for i in range(self.changelog.count()):
1820 for i in range(self.changelog.count()):
1810 changesets += 1
1821 changesets += 1
1811 n = self.changelog.node(i)
1822 n = self.changelog.node(i)
1812 l = self.changelog.linkrev(n)
1823 l = self.changelog.linkrev(n)
1813 if l != i:
1824 if l != i:
1814 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1825 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1815 if n in seen:
1826 if n in seen:
1816 err(_("duplicate changeset at revision %d") % i)
1827 err(_("duplicate changeset at revision %d") % i)
1817 seen[n] = 1
1828 seen[n] = 1
1818
1829
1819 for p in self.changelog.parents(n):
1830 for p in self.changelog.parents(n):
1820 if p not in self.changelog.nodemap:
1831 if p not in self.changelog.nodemap:
1821 err(_("changeset %s has unknown parent %s") %
1832 err(_("changeset %s has unknown parent %s") %
1822 (short(n), short(p)))
1833 (short(n), short(p)))
1823 try:
1834 try:
1824 changes = self.changelog.read(n)
1835 changes = self.changelog.read(n)
1825 except KeyboardInterrupt:
1836 except KeyboardInterrupt:
1826 self.ui.warn(_("interrupted"))
1837 self.ui.warn(_("interrupted"))
1827 raise
1838 raise
1828 except Exception, inst:
1839 except Exception, inst:
1829 err(_("unpacking changeset %s: %s") % (short(n), inst))
1840 err(_("unpacking changeset %s: %s") % (short(n), inst))
1830 continue
1841 continue
1831
1842
1832 neededmanifests[changes[0]] = n
1843 neededmanifests[changes[0]] = n
1833
1844
1834 for f in changes[3]:
1845 for f in changes[3]:
1835 filelinkrevs.setdefault(f, []).append(i)
1846 filelinkrevs.setdefault(f, []).append(i)
1836
1847
1837 seen = {}
1848 seen = {}
1838 self.ui.status(_("checking manifests\n"))
1849 self.ui.status(_("checking manifests\n"))
1839 checksize(self.manifest, "manifest")
1850 checksize(self.manifest, "manifest")
1840
1851
1841 for i in range(self.manifest.count()):
1852 for i in range(self.manifest.count()):
1842 n = self.manifest.node(i)
1853 n = self.manifest.node(i)
1843 l = self.manifest.linkrev(n)
1854 l = self.manifest.linkrev(n)
1844
1855
1845 if l < 0 or l >= self.changelog.count():
1856 if l < 0 or l >= self.changelog.count():
1846 err(_("bad manifest link (%d) at revision %d") % (l, i))
1857 err(_("bad manifest link (%d) at revision %d") % (l, i))
1847
1858
1848 if n in neededmanifests:
1859 if n in neededmanifests:
1849 del neededmanifests[n]
1860 del neededmanifests[n]
1850
1861
1851 if n in seen:
1862 if n in seen:
1852 err(_("duplicate manifest at revision %d") % i)
1863 err(_("duplicate manifest at revision %d") % i)
1853
1864
1854 seen[n] = 1
1865 seen[n] = 1
1855
1866
1856 for p in self.manifest.parents(n):
1867 for p in self.manifest.parents(n):
1857 if p not in self.manifest.nodemap:
1868 if p not in self.manifest.nodemap:
1858 err(_("manifest %s has unknown parent %s") %
1869 err(_("manifest %s has unknown parent %s") %
1859 (short(n), short(p)))
1870 (short(n), short(p)))
1860
1871
1861 try:
1872 try:
1862 delta = mdiff.patchtext(self.manifest.delta(n))
1873 delta = mdiff.patchtext(self.manifest.delta(n))
1863 except KeyboardInterrupt:
1874 except KeyboardInterrupt:
1864 self.ui.warn(_("interrupted"))
1875 self.ui.warn(_("interrupted"))
1865 raise
1876 raise
1866 except Exception, inst:
1877 except Exception, inst:
1867 err(_("unpacking manifest %s: %s") % (short(n), inst))
1878 err(_("unpacking manifest %s: %s") % (short(n), inst))
1868 continue
1879 continue
1869
1880
1870 try:
1881 try:
1871 ff = [ l.split('\0') for l in delta.splitlines() ]
1882 ff = [ l.split('\0') for l in delta.splitlines() ]
1872 for f, fn in ff:
1883 for f, fn in ff:
1873 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1884 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1874 except (ValueError, TypeError), inst:
1885 except (ValueError, TypeError), inst:
1875 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1886 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1876
1887
1877 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1888 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1878
1889
1879 for m, c in neededmanifests.items():
1890 for m, c in neededmanifests.items():
1880 err(_("Changeset %s refers to unknown manifest %s") %
1891 err(_("Changeset %s refers to unknown manifest %s") %
1881 (short(m), short(c)))
1892 (short(m), short(c)))
1882 del neededmanifests
1893 del neededmanifests
1883
1894
1884 for f in filenodes:
1895 for f in filenodes:
1885 if f not in filelinkrevs:
1896 if f not in filelinkrevs:
1886 err(_("file %s in manifest but not in changesets") % f)
1897 err(_("file %s in manifest but not in changesets") % f)
1887
1898
1888 for f in filelinkrevs:
1899 for f in filelinkrevs:
1889 if f not in filenodes:
1900 if f not in filenodes:
1890 err(_("file %s in changeset but not in manifest") % f)
1901 err(_("file %s in changeset but not in manifest") % f)
1891
1902
1892 self.ui.status(_("checking files\n"))
1903 self.ui.status(_("checking files\n"))
1893 ff = filenodes.keys()
1904 ff = filenodes.keys()
1894 ff.sort()
1905 ff.sort()
1895 for f in ff:
1906 for f in ff:
1896 if f == "/dev/null":
1907 if f == "/dev/null":
1897 continue
1908 continue
1898 files += 1
1909 files += 1
1899 if not f:
1910 if not f:
1900 err(_("file without name in manifest %s") % short(n))
1911 err(_("file without name in manifest %s") % short(n))
1901 continue
1912 continue
1902 fl = self.file(f)
1913 fl = self.file(f)
1903 checksize(fl, f)
1914 checksize(fl, f)
1904
1915
1905 nodes = {nullid: 1}
1916 nodes = {nullid: 1}
1906 seen = {}
1917 seen = {}
1907 for i in range(fl.count()):
1918 for i in range(fl.count()):
1908 revisions += 1
1919 revisions += 1
1909 n = fl.node(i)
1920 n = fl.node(i)
1910
1921
1911 if n in seen:
1922 if n in seen:
1912 err(_("%s: duplicate revision %d") % (f, i))
1923 err(_("%s: duplicate revision %d") % (f, i))
1913 if n not in filenodes[f]:
1924 if n not in filenodes[f]:
1914 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1925 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1915 else:
1926 else:
1916 del filenodes[f][n]
1927 del filenodes[f][n]
1917
1928
1918 flr = fl.linkrev(n)
1929 flr = fl.linkrev(n)
1919 if flr not in filelinkrevs.get(f, []):
1930 if flr not in filelinkrevs.get(f, []):
1920 err(_("%s:%s points to unexpected changeset %d")
1931 err(_("%s:%s points to unexpected changeset %d")
1921 % (f, short(n), flr))
1932 % (f, short(n), flr))
1922 else:
1933 else:
1923 filelinkrevs[f].remove(flr)
1934 filelinkrevs[f].remove(flr)
1924
1935
1925 # verify contents
1936 # verify contents
1926 try:
1937 try:
1927 t = fl.read(n)
1938 t = fl.read(n)
1928 except KeyboardInterrupt:
1939 except KeyboardInterrupt:
1929 self.ui.warn(_("interrupted"))
1940 self.ui.warn(_("interrupted"))
1930 raise
1941 raise
1931 except Exception, inst:
1942 except Exception, inst:
1932 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1943 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1933
1944
1934 # verify parents
1945 # verify parents
1935 (p1, p2) = fl.parents(n)
1946 (p1, p2) = fl.parents(n)
1936 if p1 not in nodes:
1947 if p1 not in nodes:
1937 err(_("file %s:%s unknown parent 1 %s") %
1948 err(_("file %s:%s unknown parent 1 %s") %
1938 (f, short(n), short(p1)))
1949 (f, short(n), short(p1)))
1939 if p2 not in nodes:
1950 if p2 not in nodes:
1940 err(_("file %s:%s unknown parent 2 %s") %
1951 err(_("file %s:%s unknown parent 2 %s") %
1941 (f, short(n), short(p1)))
1952 (f, short(n), short(p1)))
1942 nodes[n] = 1
1953 nodes[n] = 1
1943
1954
1944 # cross-check
1955 # cross-check
1945 for node in filenodes[f]:
1956 for node in filenodes[f]:
1946 err(_("node %s in manifests not in %s") % (hex(node), f))
1957 err(_("node %s in manifests not in %s") % (hex(node), f))
1947
1958
1948 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1959 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1949 (files, changesets, revisions))
1960 (files, changesets, revisions))
1950
1961
1951 if errors[0]:
1962 if errors[0]:
1952 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1963 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1953 return 1
1964 return 1
1954
1965
1955 # used to avoid circular references so destructors work
1966 # used to avoid circular references so destructors work
1956 def aftertrans(base):
1967 def aftertrans(base):
1957 p = base
1968 p = base
1958 def a():
1969 def a():
1959 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1970 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1960 util.rename(os.path.join(p, "journal.dirstate"),
1971 util.rename(os.path.join(p, "journal.dirstate"),
1961 os.path.join(p, "undo.dirstate"))
1972 os.path.join(p, "undo.dirstate"))
1962 return a
1973 return a
1963
1974
@@ -1,962 +1,1062 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
17 demandload(globals(), "sha struct zlib")
17 demandload(globals(), "sha struct zlib")
18
18
19 # revlog version strings
19 # revlog version strings
20 REVLOGV0 = 0
20 REVLOGV0 = 0
21 REVLOGNG = 1
21 REVLOGNG = 1
22
22
23 # revlog flags
24 REVLOGNGINLINEDATA = (1 << 16)
25
26 def flagstr(flag):
27 if flag == "inline":
28 return REVLOGNGINLINEDATA
29 raise RevlogError(_("unknown revlog flag %s" % flag))
30
23 def hash(text, p1, p2):
31 def hash(text, p1, p2):
24 """generate a hash from the given text and its parent hashes
32 """generate a hash from the given text and its parent hashes
25
33
26 This hash combines both the current file contents and its history
34 This hash combines both the current file contents and its history
27 in a manner that makes it easy to distinguish nodes with the same
35 in a manner that makes it easy to distinguish nodes with the same
28 content in the revision graph.
36 content in the revision graph.
29 """
37 """
30 l = [p1, p2]
38 l = [p1, p2]
31 l.sort()
39 l.sort()
32 s = sha.new(l[0])
40 s = sha.new(l[0])
33 s.update(l[1])
41 s.update(l[1])
34 s.update(text)
42 s.update(text)
35 return s.digest()
43 return s.digest()
36
44
37 def compress(text):
45 def compress(text):
38 """ generate a possibly-compressed representation of text """
46 """ generate a possibly-compressed representation of text """
39 if not text: return ("", text)
47 if not text: return ("", text)
40 if len(text) < 44:
48 if len(text) < 44:
41 if text[0] == '\0': return ("", text)
49 if text[0] == '\0': return ("", text)
42 return ('u', text)
50 return ('u', text)
43 bin = zlib.compress(text)
51 bin = zlib.compress(text)
44 if len(bin) > len(text):
52 if len(bin) > len(text):
45 if text[0] == '\0': return ("", text)
53 if text[0] == '\0': return ("", text)
46 return ('u', text)
54 return ('u', text)
47 return ("", bin)
55 return ("", bin)
48
56
49 def decompress(bin):
57 def decompress(bin):
50 """ decompress the given input """
58 """ decompress the given input """
51 if not bin: return bin
59 if not bin: return bin
52 t = bin[0]
60 t = bin[0]
53 if t == '\0': return bin
61 if t == '\0': return bin
54 if t == 'x': return zlib.decompress(bin)
62 if t == 'x': return zlib.decompress(bin)
55 if t == 'u': return bin[1:]
63 if t == 'u': return bin[1:]
56 raise RevlogError(_("unknown compression type %r") % t)
64 raise RevlogError(_("unknown compression type %r") % t)
57
65
58 indexformatv0 = ">4l20s20s20s"
66 indexformatv0 = ">4l20s20s20s"
59 # index ng:
67 # index ng:
60 # 6 bytes offset
68 # 6 bytes offset
61 # 2 bytes flags
69 # 2 bytes flags
62 # 4 bytes compressed length
70 # 4 bytes compressed length
63 # 4 bytes uncompressed length
71 # 4 bytes uncompressed length
64 # 4 bytes: base rev
72 # 4 bytes: base rev
65 # 4 bytes link rev
73 # 4 bytes link rev
66 # 4 bytes parent 1 rev
74 # 4 bytes parent 1 rev
67 # 4 bytes parent 2 rev
75 # 4 bytes parent 2 rev
68 # 32 bytes: nodeid
76 # 32 bytes: nodeid
69 indexformatng = ">Qiiiiii20s12x"
77 indexformatng = ">Qiiiiii20s12x"
70 versionformat = ">i"
78 versionformat = ">i"
71
79
72 class lazyparser(object):
80 class lazyparser(object):
73 """
81 """
74 this class avoids the need to parse the entirety of large indices
82 this class avoids the need to parse the entirety of large indices
75
83
76 By default we parse and load 1000 entries at a time.
84 By default we parse and load 1000 entries at a time.
77
85
78 If no position is specified, we load the whole index, and replace
86 If no position is specified, we load the whole index, and replace
79 the lazy objects in revlog with the underlying objects for
87 the lazy objects in revlog with the underlying objects for
80 efficiency in cases where we look at most of the nodes.
88 efficiency in cases where we look at most of the nodes.
81 """
89 """
82 def __init__(self, data, revlog, indexformat):
90 def __init__(self, data, revlog, indexformat):
83 self.data = data
91 self.data = data
84 self.s = struct.calcsize(indexformat)
92 self.s = struct.calcsize(indexformat)
85 self.indexformat = indexformat
93 self.indexformat = indexformat
86 self.l = len(data)/self.s
94 self.l = len(data)/self.s
87 self.index = [None] * self.l
95 self.index = [None] * self.l
88 self.map = {nullid: -1}
96 self.map = {nullid: -1}
89 self.all = 0
97 self.all = 0
90 self.revlog = revlog
98 self.revlog = revlog
91
99
92 def load(self, pos=None):
100 def load(self, pos=None):
93 if self.all: return
101 if self.all: return
94 if pos is not None:
102 if pos is not None:
95 block = pos / 1000
103 block = pos / 1000
96 i = block * 1000
104 i = block * 1000
97 end = min(self.l, i + 1000)
105 end = min(self.l, i + 1000)
98 else:
106 else:
99 self.all = 1
107 self.all = 1
100 i = 0
108 i = 0
101 end = self.l
109 end = self.l
102 self.revlog.index = self.index
110 self.revlog.index = self.index
103 self.revlog.nodemap = self.map
111 self.revlog.nodemap = self.map
104
112
105 while i < end:
113 while i < end:
106 if not self.index[i]:
114 if not self.index[i]:
107 d = self.data[i * self.s: (i + 1) * self.s]
115 d = self.data[i * self.s: (i + 1) * self.s]
108 e = struct.unpack(self.indexformat, d)
116 e = struct.unpack(self.indexformat, d)
109 self.index[i] = e
117 self.index[i] = e
110 self.map[e[-1]] = i
118 self.map[e[-1]] = i
111 i += 1
119 i += 1
112
120
113 class lazyindex(object):
121 class lazyindex(object):
114 """a lazy version of the index array"""
122 """a lazy version of the index array"""
115 def __init__(self, parser):
123 def __init__(self, parser):
116 self.p = parser
124 self.p = parser
117 def __len__(self):
125 def __len__(self):
118 return len(self.p.index)
126 return len(self.p.index)
119 def load(self, pos):
127 def load(self, pos):
120 if pos < 0:
128 if pos < 0:
121 pos += len(self.p.index)
129 pos += len(self.p.index)
122 self.p.load(pos)
130 self.p.load(pos)
123 return self.p.index[pos]
131 return self.p.index[pos]
124 def __getitem__(self, pos):
132 def __getitem__(self, pos):
125 return self.p.index[pos] or self.load(pos)
133 return self.p.index[pos] or self.load(pos)
126 def __setitem__(self, pos, item):
134 def __setitem__(self, pos, item):
127 self.p.index[pos] = item
135 self.p.index[pos] = item
128 def __delitem__(self, pos):
136 def __delitem__(self, pos):
129 del self.p.index[pos]
137 del self.p.index[pos]
130 def append(self, e):
138 def append(self, e):
131 self.p.index.append(e)
139 self.p.index.append(e)
132
140
133 class lazymap(object):
141 class lazymap(object):
134 """a lazy version of the node map"""
142 """a lazy version of the node map"""
135 def __init__(self, parser):
143 def __init__(self, parser):
136 self.p = parser
144 self.p = parser
137 def load(self, key):
145 def load(self, key):
138 if self.p.all: return
146 if self.p.all: return
139 n = self.p.data.find(key)
147 n = self.p.data.find(key)
140 if n < 0:
148 if n < 0:
141 raise KeyError(key)
149 raise KeyError(key)
142 pos = n / self.p.s
150 pos = n / self.p.s
143 self.p.load(pos)
151 self.p.load(pos)
144 def __contains__(self, key):
152 def __contains__(self, key):
145 self.p.load()
153 self.p.load()
146 return key in self.p.map
154 return key in self.p.map
147 def __iter__(self):
155 def __iter__(self):
148 yield nullid
156 yield nullid
149 for i in xrange(self.p.l):
157 for i in xrange(self.p.l):
150 try:
158 try:
151 yield self.p.index[i][-1]
159 yield self.p.index[i][-1]
152 except:
160 except:
153 self.p.load(i)
161 self.p.load(i)
154 yield self.p.index[i][-1]
162 yield self.p.index[i][-1]
155 def __getitem__(self, key):
163 def __getitem__(self, key):
156 try:
164 try:
157 return self.p.map[key]
165 return self.p.map[key]
158 except KeyError:
166 except KeyError:
159 try:
167 try:
160 self.load(key)
168 self.load(key)
161 return self.p.map[key]
169 return self.p.map[key]
162 except KeyError:
170 except KeyError:
163 raise KeyError("node " + hex(key))
171 raise KeyError("node " + hex(key))
164 def __setitem__(self, key, val):
172 def __setitem__(self, key, val):
165 self.p.map[key] = val
173 self.p.map[key] = val
166 def __delitem__(self, key):
174 def __delitem__(self, key):
167 del self.p.map[key]
175 del self.p.map[key]
168
176
169 class RevlogError(Exception): pass
177 class RevlogError(Exception): pass
170
178
171 class revlog(object):
179 class revlog(object):
172 """
180 """
173 the underlying revision storage object
181 the underlying revision storage object
174
182
175 A revlog consists of two parts, an index and the revision data.
183 A revlog consists of two parts, an index and the revision data.
176
184
177 The index is a file with a fixed record size containing
185 The index is a file with a fixed record size containing
178 information on each revision, includings its nodeid (hash), the
186 information on each revision, includings its nodeid (hash), the
179 nodeids of its parents, the position and offset of its data within
187 nodeids of its parents, the position and offset of its data within
180 the data file, and the revision it's based on. Finally, each entry
188 the data file, and the revision it's based on. Finally, each entry
181 contains a linkrev entry that can serve as a pointer to external
189 contains a linkrev entry that can serve as a pointer to external
182 data.
190 data.
183
191
184 The revision data itself is a linear collection of data chunks.
192 The revision data itself is a linear collection of data chunks.
185 Each chunk represents a revision and is usually represented as a
193 Each chunk represents a revision and is usually represented as a
186 delta against the previous chunk. To bound lookup time, runs of
194 delta against the previous chunk. To bound lookup time, runs of
187 deltas are limited to about 2 times the length of the original
195 deltas are limited to about 2 times the length of the original
188 version data. This makes retrieval of a version proportional to
196 version data. This makes retrieval of a version proportional to
189 its size, or O(1) relative to the number of revisions.
197 its size, or O(1) relative to the number of revisions.
190
198
191 Both pieces of the revlog are written to in an append-only
199 Both pieces of the revlog are written to in an append-only
192 fashion, which means we never need to rewrite a file to insert or
200 fashion, which means we never need to rewrite a file to insert or
193 remove data, and can use some simple techniques to avoid the need
201 remove data, and can use some simple techniques to avoid the need
194 for locking while reading.
202 for locking while reading.
195 """
203 """
196 def __init__(self, opener, indexfile, datafile, defversion=0):
204 def __init__(self, opener, indexfile, datafile, defversion=0):
197 """
205 """
198 create a revlog object
206 create a revlog object
199
207
200 opener is a function that abstracts the file opening operation
208 opener is a function that abstracts the file opening operation
201 and can be used to implement COW semantics or the like.
209 and can be used to implement COW semantics or the like.
202 """
210 """
203 self.indexfile = indexfile
211 self.indexfile = indexfile
204 self.datafile = datafile
212 self.datafile = datafile
205 self.opener = opener
213 self.opener = opener
206
214
207 self.indexstat = None
215 self.indexstat = None
208 self.cache = None
216 self.cache = None
209 self.chunkcache = None
217 self.chunkcache = None
210 self.defversion = defversion
218 self.defversion = defversion
211 self.load()
219 self.load()
212
220
213 def load(self):
221 def load(self):
214 v = self.defversion
222 v = self.defversion
215 try:
223 try:
216 f = self.opener(self.indexfile)
224 f = self.opener(self.indexfile)
217 i = f.read()
225 i = f.read()
218 except IOError, inst:
226 except IOError, inst:
219 if inst.errno != errno.ENOENT:
227 if inst.errno != errno.ENOENT:
220 raise
228 raise
221 i = ""
229 i = ""
222 else:
230 else:
223 try:
231 try:
224 st = os.fstat(f.fileno())
232 st = os.fstat(f.fileno())
225 except AttributeError, inst:
233 except AttributeError, inst:
226 st = None
234 st = None
227 else:
235 else:
228 oldst = self.indexstat
236 oldst = self.indexstat
229 if (oldst and st.st_dev == oldst.st_dev
237 if (oldst and st.st_dev == oldst.st_dev
230 and st.st_ino == oldst.st_ino
238 and st.st_ino == oldst.st_ino
231 and st.st_mtime == oldst.st_mtime
239 and st.st_mtime == oldst.st_mtime
232 and st.st_ctime == oldst.st_ctime):
240 and st.st_ctime == oldst.st_ctime):
233 return
241 return
234 self.indexstat = st
242 self.indexstat = st
235 if len(i) > 0:
243 if len(i) > 0:
236 v = struct.unpack(versionformat, i[:4])[0]
244 v = struct.unpack(versionformat, i[:4])[0]
237 if v != 0:
245 flags = v & ~0xFFFF
238 flags = v & ~0xFFFF
246 fmt = v & 0xFFFF
239 fmt = v & 0xFFFF
247 if fmt == 0:
240 if fmt != REVLOGNG or (flags & ~(REVLOGNGINLINEDATA)):
248 if flags:
241 raise RevlogError(
249 raise RevlogError(_("index %s invalid flags %x for format v0" %
242 _("unknown version format %d or flags %x on %s") %
250 (self.indexfile, flags)))
243 (v, flags, self.indexfile))
251 elif fmt == REVLOGNG:
252 if flags & ~REVLOGNGINLINEDATA:
253 raise RevlogError(_("index %s invalid flags %x for revlogng" %
254 (self.indexfile, flags)))
255 else:
256 raise RevlogError(_("index %s invalid format %d" %
257 (self.indexfile, fmt)))
244 self.version = v
258 self.version = v
245 if v == 0:
259 if v == 0:
246 self.indexformat = indexformatv0
260 self.indexformat = indexformatv0
247 else:
261 else:
248 self.indexformat = indexformatng
262 self.indexformat = indexformatng
249
263
250 if i:
264 if i:
251 if st and st.st_size > 10000:
265 if not self.inlinedata() and st and st.st_size > 10000:
252 # big index, let's parse it on demand
266 # big index, let's parse it on demand
253 parser = lazyparser(i, self, self.indexformat)
267 parser = lazyparser(i, self, self.indexformat)
254 self.index = lazyindex(parser)
268 self.index = lazyindex(parser)
255 self.nodemap = lazymap(parser)
269 self.nodemap = lazymap(parser)
256 else:
270 else:
257 self.parseindex(i)
271 self.parseindex(i)
272 if self.inlinedata():
273 # we've already got the entire data file read in, save it
274 # in the chunk data
275 self.chunkcache = (0, i)
258 if self.version != 0:
276 if self.version != 0:
259 e = list(self.index[0])
277 e = list(self.index[0])
260 type = self.ngtype(e[0])
278 type = self.ngtype(e[0])
261 e[0] = self.offset_type(0, type)
279 e[0] = self.offset_type(0, type)
262 self.index[0] = e
280 self.index[0] = e
263 else:
281 else:
264 self.nodemap = { nullid: -1}
282 self.nodemap = { nullid: -1}
265 self.index = []
283 self.index = []
266
284
267
285
268 def parseindex(self, data):
286 def parseindex(self, data):
269 s = struct.calcsize(self.indexformat)
287 s = struct.calcsize(self.indexformat)
270 l = len(data)
288 l = len(data)
271 self.index = []
289 self.index = []
272 self.nodemap = {nullid: -1}
290 self.nodemap = {nullid: -1}
291 inline = self.inlinedata()
273 off = 0
292 off = 0
274 n = 0
293 n = 0
275 while off < l:
294 while off < l:
276 e = struct.unpack(self.indexformat, data[off:off + s])
295 e = struct.unpack(self.indexformat, data[off:off + s])
277 self.index.append(e)
296 self.index.append(e)
278 self.nodemap[e[-1]] = n
297 self.nodemap[e[-1]] = n
279 n += 1
298 n += 1
280 off += s
299 off += s
300 if inline:
301 off += e[1]
281
302
282 def ngoffset(self, q):
303 def ngoffset(self, q):
283 if q & 0xFFFF:
304 if q & 0xFFFF:
284 raise RevlogError(_('%s: incompatible revision flag %x') %
305 raise RevlogError(_('%s: incompatible revision flag %x') %
285 (self.indexfile, type))
306 (self.indexfile, type))
286 return long(q >> 16)
307 return long(q >> 16)
287
308
288 def ngtype(self, q):
309 def ngtype(self, q):
289 return int(q & 0xFFFF)
310 return int(q & 0xFFFF)
290
311
291 def offset_type(self, offset, type):
312 def offset_type(self, offset, type):
292 return long(long(offset) << 16 | type)
313 return long(long(offset) << 16 | type)
293
314
294 def loadindexmap(self):
315 def loadindexmap(self):
295 """loads both the map and the index from the lazy parser"""
316 """loads both the map and the index from the lazy parser"""
296 if isinstance(self.index, lazyindex):
317 if isinstance(self.index, lazyindex):
297 p = self.index.p
318 p = self.index.p
298 p.load()
319 p.load()
299
320
321 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
300 def tip(self): return self.node(len(self.index) - 1)
322 def tip(self): return self.node(len(self.index) - 1)
301 def count(self): return len(self.index)
323 def count(self): return len(self.index)
302 def node(self, rev):
324 def node(self, rev):
303 return (rev < 0) and nullid or self.index[rev][-1]
325 return (rev < 0) and nullid or self.index[rev][-1]
304 def rev(self, node):
326 def rev(self, node):
305 try:
327 try:
306 return self.nodemap[node]
328 return self.nodemap[node]
307 except KeyError:
329 except KeyError:
308 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
330 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
309 def linkrev(self, node): return self.index[self.rev(node)][-4]
331 def linkrev(self, node): return self.index[self.rev(node)][-4]
310 def parents(self, node):
332 def parents(self, node):
311 if node == nullid: return (nullid, nullid)
333 if node == nullid: return (nullid, nullid)
312 r = self.rev(node)
334 r = self.rev(node)
313 d = self.index[r][-3:-1]
335 d = self.index[r][-3:-1]
314 if self.version == 0:
336 if self.version == 0:
315 return d
337 return d
316 return [ self.node(x) for x in d ]
338 return [ self.node(x) for x in d ]
317 def start(self, rev):
339 def start(self, rev):
318 if rev < 0:
340 if rev < 0:
319 return -1
341 return -1
320 if self.version != 0:
342 if self.version != 0:
321 return self.ngoffset(self.index[rev][0])
343 return self.ngoffset(self.index[rev][0])
322 return self.index[rev][0]
344 return self.index[rev][0]
323 def end(self, rev): return self.start(rev) + self.length(rev)
345 def end(self, rev): return self.start(rev) + self.length(rev)
324
346
325 def length(self, rev):
347 def length(self, rev):
326 if rev < 0:
348 if rev < 0:
327 return 0
349 return 0
328 else:
350 else:
329 return self.index[rev][1]
351 return self.index[rev][1]
330 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
352 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
331
353
332 def reachable(self, rev, stop=None):
354 def reachable(self, rev, stop=None):
333 reachable = {}
355 reachable = {}
334 visit = [rev]
356 visit = [rev]
335 reachable[rev] = 1
357 reachable[rev] = 1
336 if stop:
358 if stop:
337 stopn = self.rev(stop)
359 stopn = self.rev(stop)
338 else:
360 else:
339 stopn = 0
361 stopn = 0
340 while visit:
362 while visit:
341 n = visit.pop(0)
363 n = visit.pop(0)
342 if n == stop:
364 if n == stop:
343 continue
365 continue
344 if n == nullid:
366 if n == nullid:
345 continue
367 continue
346 for p in self.parents(n):
368 for p in self.parents(n):
347 if self.rev(p) < stopn:
369 if self.rev(p) < stopn:
348 continue
370 continue
349 if p not in reachable:
371 if p not in reachable:
350 reachable[p] = 1
372 reachable[p] = 1
351 visit.append(p)
373 visit.append(p)
352 return reachable
374 return reachable
353
375
354 def nodesbetween(self, roots=None, heads=None):
376 def nodesbetween(self, roots=None, heads=None):
355 """Return a tuple containing three elements. Elements 1 and 2 contain
377 """Return a tuple containing three elements. Elements 1 and 2 contain
356 a final list bases and heads after all the unreachable ones have been
378 a final list bases and heads after all the unreachable ones have been
357 pruned. Element 0 contains a topologically sorted list of all
379 pruned. Element 0 contains a topologically sorted list of all
358
380
359 nodes that satisfy these constraints:
381 nodes that satisfy these constraints:
360 1. All nodes must be descended from a node in roots (the nodes on
382 1. All nodes must be descended from a node in roots (the nodes on
361 roots are considered descended from themselves).
383 roots are considered descended from themselves).
362 2. All nodes must also be ancestors of a node in heads (the nodes in
384 2. All nodes must also be ancestors of a node in heads (the nodes in
363 heads are considered to be their own ancestors).
385 heads are considered to be their own ancestors).
364
386
365 If roots is unspecified, nullid is assumed as the only root.
387 If roots is unspecified, nullid is assumed as the only root.
366 If heads is unspecified, it is taken to be the output of the
388 If heads is unspecified, it is taken to be the output of the
367 heads method (i.e. a list of all nodes in the repository that
389 heads method (i.e. a list of all nodes in the repository that
368 have no children)."""
390 have no children)."""
369 nonodes = ([], [], [])
391 nonodes = ([], [], [])
370 if roots is not None:
392 if roots is not None:
371 roots = list(roots)
393 roots = list(roots)
372 if not roots:
394 if not roots:
373 return nonodes
395 return nonodes
374 lowestrev = min([self.rev(n) for n in roots])
396 lowestrev = min([self.rev(n) for n in roots])
375 else:
397 else:
376 roots = [nullid] # Everybody's a descendent of nullid
398 roots = [nullid] # Everybody's a descendent of nullid
377 lowestrev = -1
399 lowestrev = -1
378 if (lowestrev == -1) and (heads is None):
400 if (lowestrev == -1) and (heads is None):
379 # We want _all_ the nodes!
401 # We want _all_ the nodes!
380 return ([self.node(r) for r in xrange(0, self.count())],
402 return ([self.node(r) for r in xrange(0, self.count())],
381 [nullid], list(self.heads()))
403 [nullid], list(self.heads()))
382 if heads is None:
404 if heads is None:
383 # All nodes are ancestors, so the latest ancestor is the last
405 # All nodes are ancestors, so the latest ancestor is the last
384 # node.
406 # node.
385 highestrev = self.count() - 1
407 highestrev = self.count() - 1
386 # Set ancestors to None to signal that every node is an ancestor.
408 # Set ancestors to None to signal that every node is an ancestor.
387 ancestors = None
409 ancestors = None
388 # Set heads to an empty dictionary for later discovery of heads
410 # Set heads to an empty dictionary for later discovery of heads
389 heads = {}
411 heads = {}
390 else:
412 else:
391 heads = list(heads)
413 heads = list(heads)
392 if not heads:
414 if not heads:
393 return nonodes
415 return nonodes
394 ancestors = {}
416 ancestors = {}
395 # Start at the top and keep marking parents until we're done.
417 # Start at the top and keep marking parents until we're done.
396 nodestotag = heads[:]
418 nodestotag = heads[:]
397 # Turn heads into a dictionary so we can remove 'fake' heads.
419 # Turn heads into a dictionary so we can remove 'fake' heads.
398 # Also, later we will be using it to filter out the heads we can't
420 # Also, later we will be using it to filter out the heads we can't
399 # find from roots.
421 # find from roots.
400 heads = dict.fromkeys(heads, 0)
422 heads = dict.fromkeys(heads, 0)
401 # Remember where the top was so we can use it as a limit later.
423 # Remember where the top was so we can use it as a limit later.
402 highestrev = max([self.rev(n) for n in nodestotag])
424 highestrev = max([self.rev(n) for n in nodestotag])
403 while nodestotag:
425 while nodestotag:
404 # grab a node to tag
426 # grab a node to tag
405 n = nodestotag.pop()
427 n = nodestotag.pop()
406 # Never tag nullid
428 # Never tag nullid
407 if n == nullid:
429 if n == nullid:
408 continue
430 continue
409 # A node's revision number represents its place in a
431 # A node's revision number represents its place in a
410 # topologically sorted list of nodes.
432 # topologically sorted list of nodes.
411 r = self.rev(n)
433 r = self.rev(n)
412 if r >= lowestrev:
434 if r >= lowestrev:
413 if n not in ancestors:
435 if n not in ancestors:
414 # If we are possibly a descendent of one of the roots
436 # If we are possibly a descendent of one of the roots
415 # and we haven't already been marked as an ancestor
437 # and we haven't already been marked as an ancestor
416 ancestors[n] = 1 # Mark as ancestor
438 ancestors[n] = 1 # Mark as ancestor
417 # Add non-nullid parents to list of nodes to tag.
439 # Add non-nullid parents to list of nodes to tag.
418 nodestotag.extend([p for p in self.parents(n) if
440 nodestotag.extend([p for p in self.parents(n) if
419 p != nullid])
441 p != nullid])
420 elif n in heads: # We've seen it before, is it a fake head?
442 elif n in heads: # We've seen it before, is it a fake head?
421 # So it is, real heads should not be the ancestors of
443 # So it is, real heads should not be the ancestors of
422 # any other heads.
444 # any other heads.
423 heads.pop(n)
445 heads.pop(n)
424 if not ancestors:
446 if not ancestors:
425 return nonodes
447 return nonodes
426 # Now that we have our set of ancestors, we want to remove any
448 # Now that we have our set of ancestors, we want to remove any
427 # roots that are not ancestors.
449 # roots that are not ancestors.
428
450
429 # If one of the roots was nullid, everything is included anyway.
451 # If one of the roots was nullid, everything is included anyway.
430 if lowestrev > -1:
452 if lowestrev > -1:
431 # But, since we weren't, let's recompute the lowest rev to not
453 # But, since we weren't, let's recompute the lowest rev to not
432 # include roots that aren't ancestors.
454 # include roots that aren't ancestors.
433
455
434 # Filter out roots that aren't ancestors of heads
456 # Filter out roots that aren't ancestors of heads
435 roots = [n for n in roots if n in ancestors]
457 roots = [n for n in roots if n in ancestors]
436 # Recompute the lowest revision
458 # Recompute the lowest revision
437 if roots:
459 if roots:
438 lowestrev = min([self.rev(n) for n in roots])
460 lowestrev = min([self.rev(n) for n in roots])
439 else:
461 else:
440 # No more roots? Return empty list
462 # No more roots? Return empty list
441 return nonodes
463 return nonodes
442 else:
464 else:
443 # We are descending from nullid, and don't need to care about
465 # We are descending from nullid, and don't need to care about
444 # any other roots.
466 # any other roots.
445 lowestrev = -1
467 lowestrev = -1
446 roots = [nullid]
468 roots = [nullid]
447 # Transform our roots list into a 'set' (i.e. a dictionary where the
469 # Transform our roots list into a 'set' (i.e. a dictionary where the
448 # values don't matter.
470 # values don't matter.
449 descendents = dict.fromkeys(roots, 1)
471 descendents = dict.fromkeys(roots, 1)
450 # Also, keep the original roots so we can filter out roots that aren't
472 # Also, keep the original roots so we can filter out roots that aren't
451 # 'real' roots (i.e. are descended from other roots).
473 # 'real' roots (i.e. are descended from other roots).
452 roots = descendents.copy()
474 roots = descendents.copy()
453 # Our topologically sorted list of output nodes.
475 # Our topologically sorted list of output nodes.
454 orderedout = []
476 orderedout = []
455 # Don't start at nullid since we don't want nullid in our output list,
477 # Don't start at nullid since we don't want nullid in our output list,
456 # and if nullid shows up in descedents, empty parents will look like
478 # and if nullid shows up in descedents, empty parents will look like
457 # they're descendents.
479 # they're descendents.
458 for r in xrange(max(lowestrev, 0), highestrev + 1):
480 for r in xrange(max(lowestrev, 0), highestrev + 1):
459 n = self.node(r)
481 n = self.node(r)
460 isdescendent = False
482 isdescendent = False
461 if lowestrev == -1: # Everybody is a descendent of nullid
483 if lowestrev == -1: # Everybody is a descendent of nullid
462 isdescendent = True
484 isdescendent = True
463 elif n in descendents:
485 elif n in descendents:
464 # n is already a descendent
486 # n is already a descendent
465 isdescendent = True
487 isdescendent = True
466 # This check only needs to be done here because all the roots
488 # This check only needs to be done here because all the roots
467 # will start being marked is descendents before the loop.
489 # will start being marked is descendents before the loop.
468 if n in roots:
490 if n in roots:
469 # If n was a root, check if it's a 'real' root.
491 # If n was a root, check if it's a 'real' root.
470 p = tuple(self.parents(n))
492 p = tuple(self.parents(n))
471 # If any of its parents are descendents, it's not a root.
493 # If any of its parents are descendents, it's not a root.
472 if (p[0] in descendents) or (p[1] in descendents):
494 if (p[0] in descendents) or (p[1] in descendents):
473 roots.pop(n)
495 roots.pop(n)
474 else:
496 else:
475 p = tuple(self.parents(n))
497 p = tuple(self.parents(n))
476 # A node is a descendent if either of its parents are
498 # A node is a descendent if either of its parents are
477 # descendents. (We seeded the dependents list with the roots
499 # descendents. (We seeded the dependents list with the roots
478 # up there, remember?)
500 # up there, remember?)
479 if (p[0] in descendents) or (p[1] in descendents):
501 if (p[0] in descendents) or (p[1] in descendents):
480 descendents[n] = 1
502 descendents[n] = 1
481 isdescendent = True
503 isdescendent = True
482 if isdescendent and ((ancestors is None) or (n in ancestors)):
504 if isdescendent and ((ancestors is None) or (n in ancestors)):
483 # Only include nodes that are both descendents and ancestors.
505 # Only include nodes that are both descendents and ancestors.
484 orderedout.append(n)
506 orderedout.append(n)
485 if (ancestors is not None) and (n in heads):
507 if (ancestors is not None) and (n in heads):
486 # We're trying to figure out which heads are reachable
508 # We're trying to figure out which heads are reachable
487 # from roots.
509 # from roots.
488 # Mark this head as having been reached
510 # Mark this head as having been reached
489 heads[n] = 1
511 heads[n] = 1
490 elif ancestors is None:
512 elif ancestors is None:
491 # Otherwise, we're trying to discover the heads.
513 # Otherwise, we're trying to discover the heads.
492 # Assume this is a head because if it isn't, the next step
514 # Assume this is a head because if it isn't, the next step
493 # will eventually remove it.
515 # will eventually remove it.
494 heads[n] = 1
516 heads[n] = 1
495 # But, obviously its parents aren't.
517 # But, obviously its parents aren't.
496 for p in self.parents(n):
518 for p in self.parents(n):
497 heads.pop(p, None)
519 heads.pop(p, None)
498 heads = [n for n in heads.iterkeys() if heads[n] != 0]
520 heads = [n for n in heads.iterkeys() if heads[n] != 0]
499 roots = roots.keys()
521 roots = roots.keys()
500 assert orderedout
522 assert orderedout
501 assert roots
523 assert roots
502 assert heads
524 assert heads
503 return (orderedout, roots, heads)
525 return (orderedout, roots, heads)
504
526
505 def heads(self, start=None):
527 def heads(self, start=None):
506 """return the list of all nodes that have no children
528 """return the list of all nodes that have no children
507
529
508 if start is specified, only heads that are descendants of
530 if start is specified, only heads that are descendants of
509 start will be returned
531 start will be returned
510
532
511 """
533 """
512 if start is None:
534 if start is None:
513 start = nullid
535 start = nullid
514 reachable = {start: 1}
536 reachable = {start: 1}
515 heads = {start: 1}
537 heads = {start: 1}
516 startrev = self.rev(start)
538 startrev = self.rev(start)
517
539
518 for r in xrange(startrev + 1, self.count()):
540 for r in xrange(startrev + 1, self.count()):
519 n = self.node(r)
541 n = self.node(r)
520 for pn in self.parents(n):
542 for pn in self.parents(n):
521 if pn in reachable:
543 if pn in reachable:
522 reachable[n] = 1
544 reachable[n] = 1
523 heads[n] = 1
545 heads[n] = 1
524 if pn in heads:
546 if pn in heads:
525 del heads[pn]
547 del heads[pn]
526 return heads.keys()
548 return heads.keys()
527
549
528 def children(self, node):
550 def children(self, node):
529 """find the children of a given node"""
551 """find the children of a given node"""
530 c = []
552 c = []
531 p = self.rev(node)
553 p = self.rev(node)
532 for r in range(p + 1, self.count()):
554 for r in range(p + 1, self.count()):
533 n = self.node(r)
555 n = self.node(r)
534 for pn in self.parents(n):
556 for pn in self.parents(n):
535 if pn == node:
557 if pn == node:
536 c.append(n)
558 c.append(n)
537 continue
559 continue
538 elif pn == nullid:
560 elif pn == nullid:
539 continue
561 continue
540 return c
562 return c
541
563
542 def lookup(self, id):
564 def lookup(self, id):
543 """locate a node based on revision number or subset of hex nodeid"""
565 """locate a node based on revision number or subset of hex nodeid"""
544 try:
566 try:
545 rev = int(id)
567 rev = int(id)
546 if str(rev) != id: raise ValueError
568 if str(rev) != id: raise ValueError
547 if rev < 0: rev = self.count() + rev
569 if rev < 0: rev = self.count() + rev
548 if rev < 0 or rev >= self.count(): raise ValueError
570 if rev < 0 or rev >= self.count(): raise ValueError
549 return self.node(rev)
571 return self.node(rev)
550 except (ValueError, OverflowError):
572 except (ValueError, OverflowError):
551 c = []
573 c = []
552 for n in self.nodemap:
574 for n in self.nodemap:
553 if hex(n).startswith(id):
575 if hex(n).startswith(id):
554 c.append(n)
576 c.append(n)
555 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
577 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
556 if len(c) < 1: raise RevlogError(_("No match found"))
578 if len(c) < 1: raise RevlogError(_("No match found"))
557 return c[0]
579 return c[0]
558
580
559 return None
581 return None
560
582
561 def diff(self, a, b):
583 def diff(self, a, b):
562 """return a delta between two revisions"""
584 """return a delta between two revisions"""
563 return mdiff.textdiff(a, b)
585 return mdiff.textdiff(a, b)
564
586
565 def patches(self, t, pl):
587 def patches(self, t, pl):
566 """apply a list of patches to a string"""
588 """apply a list of patches to a string"""
567 return mdiff.patches(t, pl)
589 return mdiff.patches(t, pl)
568
590
569 def chunk(self, rev, df=None, cachelen=4096):
591 def chunk(self, rev, df=None, cachelen=4096):
570 start, length = self.start(rev), self.length(rev)
592 start, length = self.start(rev), self.length(rev)
593 inline = self.inlinedata()
594 if inline:
595 start += (rev + 1) * struct.calcsize(self.indexformat)
571 end = start + length
596 end = start + length
572 def loadcache(df):
597 def loadcache(df):
573 cache_length = max(cachelen, length) # 4k
598 cache_length = max(cachelen, length) # 4k
574 if not df:
599 if not df:
575 df = self.opener(self.datafile)
600 if inline:
601 df = self.opener(self.indexfile)
602 else:
603 df = self.opener(self.datafile)
576 df.seek(start)
604 df.seek(start)
577 self.chunkcache = (start, df.read(cache_length))
605 self.chunkcache = (start, df.read(cache_length))
578
606
579 if not self.chunkcache:
607 if not self.chunkcache:
580 loadcache(df)
608 loadcache(df)
581
609
582 cache_start = self.chunkcache[0]
610 cache_start = self.chunkcache[0]
583 cache_end = cache_start + len(self.chunkcache[1])
611 cache_end = cache_start + len(self.chunkcache[1])
584 if start >= cache_start and end <= cache_end:
612 if start >= cache_start and end <= cache_end:
585 # it is cached
613 # it is cached
586 offset = start - cache_start
614 offset = start - cache_start
587 else:
615 else:
588 loadcache(df)
616 loadcache(df)
589 offset = 0
617 offset = 0
590
618
591 #def checkchunk():
619 #def checkchunk():
592 # df = self.opener(self.datafile)
620 # df = self.opener(self.datafile)
593 # df.seek(start)
621 # df.seek(start)
594 # return df.read(length)
622 # return df.read(length)
595 #assert s == checkchunk()
623 #assert s == checkchunk()
596 return decompress(self.chunkcache[1][offset:offset + length])
624 return decompress(self.chunkcache[1][offset:offset + length])
597
625
598 def delta(self, node):
626 def delta(self, node):
599 """return or calculate a delta between a node and its predecessor"""
627 """return or calculate a delta between a node and its predecessor"""
600 r = self.rev(node)
628 r = self.rev(node)
601 return self.revdiff(r - 1, r)
629 return self.revdiff(r - 1, r)
602
630
603 def revdiff(self, rev1, rev2):
631 def revdiff(self, rev1, rev2):
604 """return or calculate a delta between two revisions"""
632 """return or calculate a delta between two revisions"""
605 b1 = self.base(rev1)
633 b1 = self.base(rev1)
606 b2 = self.base(rev2)
634 b2 = self.base(rev2)
607 if b1 == b2 and rev1 + 1 == rev2:
635 if b1 == b2 and rev1 + 1 == rev2:
608 return self.chunk(rev2)
636 return self.chunk(rev2)
609 else:
637 else:
610 return self.diff(self.revision(self.node(rev1)),
638 return self.diff(self.revision(self.node(rev1)),
611 self.revision(self.node(rev2)))
639 self.revision(self.node(rev2)))
612
640
613 def revision(self, node):
641 def revision(self, node):
614 """return an uncompressed revision of a given"""
642 """return an uncompressed revision of a given"""
615 if node == nullid: return ""
643 if node == nullid: return ""
616 if self.cache and self.cache[0] == node: return self.cache[2]
644 if self.cache and self.cache[0] == node: return self.cache[2]
617
645
618 # look up what we need to read
646 # look up what we need to read
619 text = None
647 text = None
620 rev = self.rev(node)
648 rev = self.rev(node)
621 base = self.base(rev)
649 base = self.base(rev)
622
650
623 df = self.opener(self.datafile)
651 if self.inlinedata():
652 # we probably have the whole chunk cached
653 df = None
654 else:
655 df = self.opener(self.datafile)
624
656
625 # do we have useful data cached?
657 # do we have useful data cached?
626 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
658 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
627 base = self.cache[1]
659 base = self.cache[1]
628 text = self.cache[2]
660 text = self.cache[2]
629 else:
661 else:
630 text = self.chunk(base, df=df)
662 text = self.chunk(base, df=df)
631
663
632 bins = []
664 bins = []
633 for r in xrange(base + 1, rev + 1):
665 for r in xrange(base + 1, rev + 1):
634 bins.append(self.chunk(r, df=df))
666 bins.append(self.chunk(r, df=df))
635
667
636 text = self.patches(text, bins)
668 text = self.patches(text, bins)
637
669
638 p1, p2 = self.parents(node)
670 p1, p2 = self.parents(node)
639 if node != hash(text, p1, p2):
671 if node != hash(text, p1, p2):
640 raise RevlogError(_("integrity check failed on %s:%d")
672 raise RevlogError(_("integrity check failed on %s:%d")
641 % (self.datafile, rev))
673 % (self.datafile, rev))
642
674
643 self.cache = (node, rev, text)
675 self.cache = (node, rev, text)
644 return text
676 return text
645
677
678 def checkinlinesize(self, fp, tr):
679 if not self.inlinedata():
680 return
681 size = fp.tell()
682 if size < 131072:
683 return
684 tr.add(self.datafile, 0)
685 df = self.opener(self.datafile, 'w')
686 calc = struct.calcsize(self.indexformat)
687 for r in xrange(self.count()):
688 start = self.start(r) + (r + 1) * calc
689 length = self.length(r)
690 fp.seek(start)
691 d = fp.read(length)
692 df.write(d)
693 fp.close()
694 df.close()
695 fp = self.opener(self.indexfile, 'w', atomic=True)
696 self.version &= ~(REVLOGNGINLINEDATA)
697 if self.count():
698 x = self.index[0]
699 e = struct.pack(self.indexformat, *x)[4:]
700 l = struct.pack(versionformat, self.version)
701 fp.write(l)
702 fp.write(e)
703
704 for i in xrange(1, self.count()):
705 x = self.index[i]
706 e = struct.pack(self.indexformat, *x)
707 fp.write(e)
708
709 fp.close()
710 self.chunkcache = None
711
646 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
712 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
647 """add a revision to the log
713 """add a revision to the log
648
714
649 text - the revision data to add
715 text - the revision data to add
650 transaction - the transaction object used for rollback
716 transaction - the transaction object used for rollback
651 link - the linkrev data to add
717 link - the linkrev data to add
652 p1, p2 - the parent nodeids of the revision
718 p1, p2 - the parent nodeids of the revision
653 d - an optional precomputed delta
719 d - an optional precomputed delta
654 """
720 """
655 if text is None: text = ""
721 if text is None: text = ""
656 if p1 is None: p1 = self.tip()
722 if p1 is None: p1 = self.tip()
657 if p2 is None: p2 = nullid
723 if p2 is None: p2 = nullid
658
724
659 node = hash(text, p1, p2)
725 node = hash(text, p1, p2)
660
726
661 if node in self.nodemap:
727 if node in self.nodemap:
662 return node
728 return node
663
729
664 n = self.count()
730 n = self.count()
665 t = n - 1
731 t = n - 1
666
732
667 if n:
733 if n:
668 base = self.base(t)
734 base = self.base(t)
669 start = self.start(base)
735 start = self.start(base)
670 end = self.end(t)
736 end = self.end(t)
671 if not d:
737 if not d:
672 prev = self.revision(self.tip())
738 prev = self.revision(self.tip())
673 d = self.diff(prev, str(text))
739 d = self.diff(prev, str(text))
674 data = compress(d)
740 data = compress(d)
675 l = len(data[1]) + len(data[0])
741 l = len(data[1]) + len(data[0])
676 dist = end - start + l
742 dist = end - start + l
677
743
678 # full versions are inserted when the needed deltas
744 # full versions are inserted when the needed deltas
679 # become comparable to the uncompressed text
745 # become comparable to the uncompressed text
680 if not n or dist > len(text) * 2:
746 if not n or dist > len(text) * 2:
681 data = compress(text)
747 data = compress(text)
682 l = len(data[1]) + len(data[0])
748 l = len(data[1]) + len(data[0])
683 base = n
749 base = n
684 else:
750 else:
685 base = self.base(t)
751 base = self.base(t)
686
752
687 offset = 0
753 offset = 0
688 if t >= 0:
754 if t >= 0:
689 offset = self.end(t)
755 offset = self.end(t)
690
756
691 if self.version == 0:
757 if self.version == 0:
692 e = (offset, l, base, link, p1, p2, node)
758 e = (offset, l, base, link, p1, p2, node)
693 else:
759 else:
694 e = (self.offset_type(offset, 0), l, len(text),
760 e = (self.offset_type(offset, 0), l, len(text),
695 base, link, self.rev(p1), self.rev(p2), node)
761 base, link, self.rev(p1), self.rev(p2), node)
696
762
697 self.index.append(e)
763 self.index.append(e)
698 self.nodemap[node] = n
764 self.nodemap[node] = n
699 entry = struct.pack(self.indexformat, *e)
765 entry = struct.pack(self.indexformat, *e)
700
766
701 transaction.add(self.datafile, offset)
767 if not self.inlinedata():
702 transaction.add(self.indexfile, n * len(entry))
768 transaction.add(self.datafile, offset)
703 f = self.opener(self.datafile, "a")
769 transaction.add(self.indexfile, n * len(entry))
704 if data[0]:
770 f = self.opener(self.datafile, "a")
705 f.write(data[0])
771 if data[0]:
706 f.write(data[1])
772 f.write(data[0])
707 f = self.opener(self.indexfile, "a")
773 f.write(data[1])
774 f = self.opener(self.indexfile, "a")
775 else:
776 f = self.opener(self.indexfile, "a+")
777 transaction.add(self.indexfile, f.tell())
708
778
709 if len(self.index) == 1 and self.version != 0:
779 if len(self.index) == 1 and self.version != 0:
710 l = struct.pack(versionformat, self.version)
780 l = struct.pack(versionformat, self.version)
711 f.write(l)
781 f.write(l)
712 entry = entry[4:]
782 entry = entry[4:]
713
783
714 f.write(entry)
784 f.write(entry)
715
785
786 if self.inlinedata():
787 f.write(data[0])
788 f.write(data[1])
789 self.checkinlinesize(f, transaction)
790
716 self.cache = (node, n, text)
791 self.cache = (node, n, text)
717 return node
792 return node
718
793
719 def ancestor(self, a, b):
794 def ancestor(self, a, b):
720 """calculate the least common ancestor of nodes a and b"""
795 """calculate the least common ancestor of nodes a and b"""
721 # calculate the distance of every node from root
796 # calculate the distance of every node from root
722 dist = {nullid: 0}
797 dist = {nullid: 0}
723 for i in xrange(self.count()):
798 for i in xrange(self.count()):
724 n = self.node(i)
799 n = self.node(i)
725 p1, p2 = self.parents(n)
800 p1, p2 = self.parents(n)
726 dist[n] = max(dist[p1], dist[p2]) + 1
801 dist[n] = max(dist[p1], dist[p2]) + 1
727
802
728 # traverse ancestors in order of decreasing distance from root
803 # traverse ancestors in order of decreasing distance from root
729 def ancestors(node):
804 def ancestors(node):
730 # we store negative distances because heap returns smallest member
805 # we store negative distances because heap returns smallest member
731 h = [(-dist[node], node)]
806 h = [(-dist[node], node)]
732 seen = {}
807 seen = {}
733 while h:
808 while h:
734 d, n = heapq.heappop(h)
809 d, n = heapq.heappop(h)
735 if n not in seen:
810 if n not in seen:
736 seen[n] = 1
811 seen[n] = 1
737 yield (-d, n)
812 yield (-d, n)
738 for p in self.parents(n):
813 for p in self.parents(n):
739 heapq.heappush(h, (-dist[p], p))
814 heapq.heappush(h, (-dist[p], p))
740
815
741 def generations(node):
816 def generations(node):
742 sg, s = None, {}
817 sg, s = None, {}
743 for g,n in ancestors(node):
818 for g,n in ancestors(node):
744 if g != sg:
819 if g != sg:
745 if sg:
820 if sg:
746 yield sg, s
821 yield sg, s
747 sg, s = g, {n:1}
822 sg, s = g, {n:1}
748 else:
823 else:
749 s[n] = 1
824 s[n] = 1
750 yield sg, s
825 yield sg, s
751
826
752 x = generations(a)
827 x = generations(a)
753 y = generations(b)
828 y = generations(b)
754 gx = x.next()
829 gx = x.next()
755 gy = y.next()
830 gy = y.next()
756
831
757 # increment each ancestor list until it is closer to root than
832 # increment each ancestor list until it is closer to root than
758 # the other, or they match
833 # the other, or they match
759 while 1:
834 while 1:
760 #print "ancestor gen %s %s" % (gx[0], gy[0])
835 #print "ancestor gen %s %s" % (gx[0], gy[0])
761 if gx[0] == gy[0]:
836 if gx[0] == gy[0]:
762 # find the intersection
837 # find the intersection
763 i = [ n for n in gx[1] if n in gy[1] ]
838 i = [ n for n in gx[1] if n in gy[1] ]
764 if i:
839 if i:
765 return i[0]
840 return i[0]
766 else:
841 else:
767 #print "next"
842 #print "next"
768 gy = y.next()
843 gy = y.next()
769 gx = x.next()
844 gx = x.next()
770 elif gx[0] < gy[0]:
845 elif gx[0] < gy[0]:
771 #print "next y"
846 #print "next y"
772 gy = y.next()
847 gy = y.next()
773 else:
848 else:
774 #print "next x"
849 #print "next x"
775 gx = x.next()
850 gx = x.next()
776
851
777 def group(self, nodelist, lookup, infocollect=None):
852 def group(self, nodelist, lookup, infocollect=None):
778 """calculate a delta group
853 """calculate a delta group
779
854
780 Given a list of changeset revs, return a set of deltas and
855 Given a list of changeset revs, return a set of deltas and
781 metadata corresponding to nodes. the first delta is
856 metadata corresponding to nodes. the first delta is
782 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
857 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
783 have this parent as it has all history before these
858 have this parent as it has all history before these
784 changesets. parent is parent[0]
859 changesets. parent is parent[0]
785 """
860 """
786 revs = [self.rev(n) for n in nodelist]
861 revs = [self.rev(n) for n in nodelist]
787
862
788 # if we don't have any revisions touched by these changesets, bail
863 # if we don't have any revisions touched by these changesets, bail
789 if not revs:
864 if not revs:
790 yield changegroup.closechunk()
865 yield changegroup.closechunk()
791 return
866 return
792
867
793 # add the parent of the first rev
868 # add the parent of the first rev
794 p = self.parents(self.node(revs[0]))[0]
869 p = self.parents(self.node(revs[0]))[0]
795 revs.insert(0, self.rev(p))
870 revs.insert(0, self.rev(p))
796
871
797 # build deltas
872 # build deltas
798 for d in xrange(0, len(revs) - 1):
873 for d in xrange(0, len(revs) - 1):
799 a, b = revs[d], revs[d + 1]
874 a, b = revs[d], revs[d + 1]
800 nb = self.node(b)
875 nb = self.node(b)
801
876
802 if infocollect is not None:
877 if infocollect is not None:
803 infocollect(nb)
878 infocollect(nb)
804
879
805 d = self.revdiff(a, b)
880 d = self.revdiff(a, b)
806 p = self.parents(nb)
881 p = self.parents(nb)
807 meta = nb + p[0] + p[1] + lookup(nb)
882 meta = nb + p[0] + p[1] + lookup(nb)
808 yield changegroup.genchunk("%s%s" % (meta, d))
883 yield changegroup.genchunk("%s%s" % (meta, d))
809
884
810 yield changegroup.closechunk()
885 yield changegroup.closechunk()
811
886
812 def addgroup(self, revs, linkmapper, transaction, unique=0):
887 def addgroup(self, revs, linkmapper, transaction, unique=0):
813 """
888 """
814 add a delta group
889 add a delta group
815
890
816 given a set of deltas, add them to the revision log. the
891 given a set of deltas, add them to the revision log. the
817 first delta is against its parent, which should be in our
892 first delta is against its parent, which should be in our
818 log, the rest are against the previous delta.
893 log, the rest are against the previous delta.
819 """
894 """
820
895
821 #track the base of the current delta log
896 #track the base of the current delta log
822 r = self.count()
897 r = self.count()
823 t = r - 1
898 t = r - 1
824 node = None
899 node = None
825
900
826 base = prev = -1
901 base = prev = -1
827 start = end = measure = 0
902 start = end = measure = 0
828 if r:
903 if r:
829 end = self.end(t)
904 end = self.end(t)
830
905
831 ifh = self.opener(self.indexfile, "a+")
906 ifh = self.opener(self.indexfile, "a+")
832 transaction.add(self.indexfile, ifh.tell())
907 transaction.add(self.indexfile, ifh.tell())
833 transaction.add(self.datafile, end)
908 if self.inlinedata():
834 dfh = self.opener(self.datafile, "a")
909 dfh = None
910 else:
911 transaction.add(self.datafile, end)
912 dfh = self.opener(self.datafile, "a")
835
913
836 # loop through our set of deltas
914 # loop through our set of deltas
837 chain = None
915 chain = None
838 for chunk in revs:
916 for chunk in revs:
839 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
917 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
840 link = linkmapper(cs)
918 link = linkmapper(cs)
841 if node in self.nodemap:
919 if node in self.nodemap:
842 # this can happen if two branches make the same change
920 # this can happen if two branches make the same change
843 # if unique:
921 # if unique:
844 # raise RevlogError(_("already have %s") % hex(node[:4]))
922 # raise RevlogError(_("already have %s") % hex(node[:4]))
845 chain = node
923 chain = node
846 continue
924 continue
847 delta = chunk[80:]
925 delta = chunk[80:]
848
926
849 for p in (p1, p2):
927 for p in (p1, p2):
850 if not p in self.nodemap:
928 if not p in self.nodemap:
851 raise RevlogError(_("unknown parent %s") % short(p1))
929 raise RevlogError(_("unknown parent %s") % short(p1))
852
930
853 if not chain:
931 if not chain:
854 # retrieve the parent revision of the delta chain
932 # retrieve the parent revision of the delta chain
855 chain = p1
933 chain = p1
856 if not chain in self.nodemap:
934 if not chain in self.nodemap:
857 raise RevlogError(_("unknown base %s") % short(chain[:4]))
935 raise RevlogError(_("unknown base %s") % short(chain[:4]))
858
936
859 # full versions are inserted when the needed deltas become
937 # full versions are inserted when the needed deltas become
860 # comparable to the uncompressed text or when the previous
938 # comparable to the uncompressed text or when the previous
861 # version is not the one we have a delta against. We use
939 # version is not the one we have a delta against. We use
862 # the size of the previous full rev as a proxy for the
940 # the size of the previous full rev as a proxy for the
863 # current size.
941 # current size.
864
942
865 if chain == prev:
943 if chain == prev:
866 tempd = compress(delta)
944 tempd = compress(delta)
867 cdelta = tempd[0] + tempd[1]
945 cdelta = tempd[0] + tempd[1]
868
946
869 if chain != prev or (end - start + len(cdelta)) > measure * 2:
947 if chain != prev or (end - start + len(cdelta)) > measure * 2:
870 # flush our writes here so we can read it in revision
948 # flush our writes here so we can read it in revision
871 if dfh:
949 if dfh:
872 dfh.flush()
950 dfh.flush()
873 ifh.flush()
951 ifh.flush()
874 text = self.revision(chain)
952 text = self.revision(chain)
875 text = self.patches(text, [delta])
953 text = self.patches(text, [delta])
876 chk = self.addrevision(text, transaction, link, p1, p2)
954 chk = self.addrevision(text, transaction, link, p1, p2)
877 if chk != node:
955 if chk != node:
878 raise RevlogError(_("consistency error adding group"))
956 raise RevlogError(_("consistency error adding group"))
879 measure = len(text)
957 measure = len(text)
880 else:
958 else:
881 if self.version == 0:
959 if self.version == 0:
882 e = (end, len(cdelta), base, link, p1, p2, node)
960 e = (end, len(cdelta), base, link, p1, p2, node)
883 else:
961 else:
884 e = (self.offset_type(end, 0), len(cdelta), -1, base,
962 e = (self.offset_type(end, 0), len(cdelta), -1, base,
885 link, self.rev(p1), self.rev(p2), node)
963 link, self.rev(p1), self.rev(p2), node)
886 self.index.append(e)
964 self.index.append(e)
887 self.nodemap[node] = r
965 self.nodemap[node] = r
888 dfh.write(cdelta)
966 if self.inlinedata():
889 ifh.write(struct.pack(self.indexformat, *e))
967 ifh.write(struct.pack(self.indexformat, *e))
968 ifh.write(cdelta)
969 self.checkinlinesize(ifh, transaction)
970 if not self.inlinedata():
971 dfh = self.opener(self.datafile, "a")
972 ifh = self.opener(self.indexfile, "a")
973 else:
974 if not dfh:
975 # addrevision switched from inline to conventional
976 # reopen the index
977 dfh = self.opener(self.datafile, "a")
978 ifh = self.opener(self.indexfile, "a")
979 dfh.write(cdelta)
980 ifh.write(struct.pack(self.indexformat, *e))
890
981
891 t, r, chain, prev = r, r + 1, node, node
982 t, r, chain, prev = r, r + 1, node, node
892 base = self.base(t)
983 base = self.base(t)
893 start = self.start(base)
984 start = self.start(base)
894 end = self.end(t)
985 end = self.end(t)
895
986
896 if node is None:
987 if node is None:
897 raise RevlogError(_("group to be added is empty"))
988 raise RevlogError(_("group to be added is empty"))
898 return node
989 return node
899
990
900 def strip(self, rev, minlink):
991 def strip(self, rev, minlink):
901 if self.count() == 0 or rev >= self.count():
992 if self.count() == 0 or rev >= self.count():
902 return
993 return
903
994
904 if isinstance(self.index, lazyindex):
995 if isinstance(self.index, lazyindex):
905 self.loadindexmap()
996 self.loadindexmap()
906
997
907 # When stripping away a revision, we need to make sure it
998 # When stripping away a revision, we need to make sure it
908 # does not actually belong to an older changeset.
999 # does not actually belong to an older changeset.
909 # The minlink parameter defines the oldest revision
1000 # The minlink parameter defines the oldest revision
910 # we're allowed to strip away.
1001 # we're allowed to strip away.
911 while minlink > self.index[rev][-4]:
1002 while minlink > self.index[rev][-4]:
912 rev += 1
1003 rev += 1
913 if rev >= self.count():
1004 if rev >= self.count():
914 return
1005 return
915
1006
916 # first truncate the files on disk
1007 # first truncate the files on disk
917 end = self.start(rev)
1008 end = self.start(rev)
918 df = self.opener(self.datafile, "a")
1009 if not self.inlinedata():
919 df.truncate(end)
1010 df = self.opener(self.datafile, "a")
920 end = rev * struct.calcsize(self.indexformat)
1011 df.truncate(end)
1012 end = rev * struct.calcsize(self.indexformat)
1013 else:
1014 end += rev * struct.calcsize(self.indexformat)
921
1015
922 indexf = self.opener(self.indexfile, "a")
1016 indexf = self.opener(self.indexfile, "a")
923 indexf.truncate(end)
1017 indexf.truncate(end)
924
1018
925 # then reset internal state in memory to forget those revisions
1019 # then reset internal state in memory to forget those revisions
926 self.cache = None
1020 self.cache = None
927 self.chunkcache = None
1021 self.chunkcache = None
928 for x in xrange(rev, self.count()):
1022 for x in xrange(rev, self.count()):
929 del self.nodemap[self.node(x)]
1023 del self.nodemap[self.node(x)]
930
1024
931 del self.index[rev:]
1025 del self.index[rev:]
932
1026
933 def checksize(self):
1027 def checksize(self):
934 expected = 0
1028 expected = 0
935 if self.count():
1029 if self.count():
936 expected = self.end(self.count() - 1)
1030 expected = self.end(self.count() - 1)
937
1031
938 try:
1032 try:
939 f = self.opener(self.datafile)
1033 f = self.opener(self.datafile)
940 f.seek(0, 2)
1034 f.seek(0, 2)
941 actual = f.tell()
1035 actual = f.tell()
942 dd = actual - expected
1036 dd = actual - expected
943 except IOError, inst:
1037 except IOError, inst:
944 if inst.errno != errno.ENOENT:
1038 if inst.errno != errno.ENOENT:
945 raise
1039 raise
946 dd = 0
1040 dd = 0
947
1041
948 try:
1042 try:
949 f = self.opener(self.indexfile)
1043 f = self.opener(self.indexfile)
950 f.seek(0, 2)
1044 f.seek(0, 2)
951 actual = f.tell()
1045 actual = f.tell()
952 s = struct.calcsize(self.indexformat)
1046 s = struct.calcsize(self.indexformat)
953 i = actual / s
1047 i = actual / s
954 di = actual - (i * s)
1048 di = actual - (i * s)
1049 if self.inlinedata():
1050 databytes = 0
1051 for r in xrange(self.count()):
1052 databytes += self.length(r)
1053 dd = 0
1054 di = actual - self.count() * s - databytes
955 except IOError, inst:
1055 except IOError, inst:
956 if inst.errno != errno.ENOENT:
1056 if inst.errno != errno.ENOENT:
957 raise
1057 raise
958 di = 0
1058 di = 0
959
1059
960 return (dd, di)
1060 return (dd, di)
961
1061
962
1062
General Comments 0
You need to be logged in to leave comments. Login now