##// END OF EJS Templates
Don't die calling outgoing hook if we have no changesets
Matt Mackall -
r2107:7ff92c04 default
parent child Browse files
Show More
@@ -1,1977 +1,1979 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
15
15
16 class localrepository(object):
16 class localrepository(object):
17 def __del__(self):
17 def __del__(self):
18 self.transhandle = None
18 self.transhandle = None
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 if not path:
20 if not path:
21 p = os.getcwd()
21 p = os.getcwd()
22 while not os.path.isdir(os.path.join(p, ".hg")):
22 while not os.path.isdir(os.path.join(p, ".hg")):
23 oldp = p
23 oldp = p
24 p = os.path.dirname(p)
24 p = os.path.dirname(p)
25 if p == oldp:
25 if p == oldp:
26 raise repo.RepoError(_("no repo found"))
26 raise repo.RepoError(_("no repo found"))
27 path = p
27 path = p
28 self.path = os.path.join(path, ".hg")
28 self.path = os.path.join(path, ".hg")
29
29
30 if not create and not os.path.isdir(self.path):
30 if not create and not os.path.isdir(self.path):
31 raise repo.RepoError(_("repository %s not found") % path)
31 raise repo.RepoError(_("repository %s not found") % path)
32
32
33 self.root = os.path.abspath(path)
33 self.root = os.path.abspath(path)
34 self.origroot = path
34 self.origroot = path
35 self.ui = ui.ui(parentui=parentui)
35 self.ui = ui.ui(parentui=parentui)
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 except IOError:
41 except IOError:
42 pass
42 pass
43
43
44 v = self.ui.revlogopts
44 v = self.ui.revlogopts
45 self.revlogversion = int(v.get('format', 0))
45 self.revlogversion = int(v.get('format', 0))
46 flags = 0
46 flags = 0
47 for x in v.get('flags', "").split():
47 for x in v.get('flags', "").split():
48 flags |= revlog.flagstr(x)
48 flags |= revlog.flagstr(x)
49
49
50 v = self.revlogversion | flags
50 v = self.revlogversion | flags
51 self.manifest = manifest.manifest(self.opener, v)
51 self.manifest = manifest.manifest(self.opener, v)
52 self.changelog = changelog.changelog(self.opener, v)
52 self.changelog = changelog.changelog(self.opener, v)
53
53
54 # the changelog might not have the inline index flag
54 # the changelog might not have the inline index flag
55 # on. If the format of the changelog is the same as found in
55 # on. If the format of the changelog is the same as found in
56 # .hgrc, apply any flags found in the .hgrc as well.
56 # .hgrc, apply any flags found in the .hgrc as well.
57 # Otherwise, just version from the changelog
57 # Otherwise, just version from the changelog
58 v = self.changelog.version
58 v = self.changelog.version
59 if v == self.revlogversion:
59 if v == self.revlogversion:
60 v |= flags
60 v |= flags
61 self.revlogversion = v
61 self.revlogversion = v
62
62
63 self.tagscache = None
63 self.tagscache = None
64 self.nodetagscache = None
64 self.nodetagscache = None
65 self.encodepats = None
65 self.encodepats = None
66 self.decodepats = None
66 self.decodepats = None
67 self.transhandle = None
67 self.transhandle = None
68
68
69 if create:
69 if create:
70 os.mkdir(self.path)
70 os.mkdir(self.path)
71 os.mkdir(self.join("data"))
71 os.mkdir(self.join("data"))
72
72
73 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
73 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
74 def hook(self, name, throw=False, **args):
74 def hook(self, name, throw=False, **args):
75 def runhook(name, cmd):
75 def runhook(name, cmd):
76 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
76 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
77 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
77 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
78 [(k.upper(), v) for k, v in args.iteritems()])
78 [(k.upper(), v) for k, v in args.iteritems()])
79 r = util.system(cmd, environ=env, cwd=self.root)
79 r = util.system(cmd, environ=env, cwd=self.root)
80 if r:
80 if r:
81 desc, r = util.explain_exit(r)
81 desc, r = util.explain_exit(r)
82 if throw:
82 if throw:
83 raise util.Abort(_('%s hook %s') % (name, desc))
83 raise util.Abort(_('%s hook %s') % (name, desc))
84 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
84 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
85 return False
85 return False
86 return True
86 return True
87
87
88 r = True
88 r = True
89 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
89 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
90 if hname.split(".", 1)[0] == name and cmd]
90 if hname.split(".", 1)[0] == name and cmd]
91 hooks.sort()
91 hooks.sort()
92 for hname, cmd in hooks:
92 for hname, cmd in hooks:
93 r = runhook(hname, cmd) and r
93 r = runhook(hname, cmd) and r
94 return r
94 return r
95
95
96 def tags(self):
96 def tags(self):
97 '''return a mapping of tag to node'''
97 '''return a mapping of tag to node'''
98 if not self.tagscache:
98 if not self.tagscache:
99 self.tagscache = {}
99 self.tagscache = {}
100
100
101 def parsetag(line, context):
101 def parsetag(line, context):
102 if not line:
102 if not line:
103 return
103 return
104 s = l.split(" ", 1)
104 s = l.split(" ", 1)
105 if len(s) != 2:
105 if len(s) != 2:
106 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
106 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
107 return
107 return
108 node, key = s
108 node, key = s
109 try:
109 try:
110 bin_n = bin(node)
110 bin_n = bin(node)
111 except TypeError:
111 except TypeError:
112 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
112 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
113 return
113 return
114 if bin_n not in self.changelog.nodemap:
114 if bin_n not in self.changelog.nodemap:
115 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
115 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
116 return
116 return
117 self.tagscache[key.strip()] = bin_n
117 self.tagscache[key.strip()] = bin_n
118
118
119 # read each head of the tags file, ending with the tip
119 # read each head of the tags file, ending with the tip
120 # and add each tag found to the map, with "newer" ones
120 # and add each tag found to the map, with "newer" ones
121 # taking precedence
121 # taking precedence
122 fl = self.file(".hgtags")
122 fl = self.file(".hgtags")
123 h = fl.heads()
123 h = fl.heads()
124 h.reverse()
124 h.reverse()
125 for r in h:
125 for r in h:
126 count = 0
126 count = 0
127 for l in fl.read(r).splitlines():
127 for l in fl.read(r).splitlines():
128 count += 1
128 count += 1
129 parsetag(l, ".hgtags:%d" % count)
129 parsetag(l, ".hgtags:%d" % count)
130
130
131 try:
131 try:
132 f = self.opener("localtags")
132 f = self.opener("localtags")
133 count = 0
133 count = 0
134 for l in f:
134 for l in f:
135 count += 1
135 count += 1
136 parsetag(l, "localtags:%d" % count)
136 parsetag(l, "localtags:%d" % count)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 self.tagscache['tip'] = self.changelog.tip()
140 self.tagscache['tip'] = self.changelog.tip()
141
141
142 return self.tagscache
142 return self.tagscache
143
143
144 def tagslist(self):
144 def tagslist(self):
145 '''return a list of tags ordered by revision'''
145 '''return a list of tags ordered by revision'''
146 l = []
146 l = []
147 for t, n in self.tags().items():
147 for t, n in self.tags().items():
148 try:
148 try:
149 r = self.changelog.rev(n)
149 r = self.changelog.rev(n)
150 except:
150 except:
151 r = -2 # sort to the beginning of the list if unknown
151 r = -2 # sort to the beginning of the list if unknown
152 l.append((r, t, n))
152 l.append((r, t, n))
153 l.sort()
153 l.sort()
154 return [(t, n) for r, t, n in l]
154 return [(t, n) for r, t, n in l]
155
155
156 def nodetags(self, node):
156 def nodetags(self, node):
157 '''return the tags associated with a node'''
157 '''return the tags associated with a node'''
158 if not self.nodetagscache:
158 if not self.nodetagscache:
159 self.nodetagscache = {}
159 self.nodetagscache = {}
160 for t, n in self.tags().items():
160 for t, n in self.tags().items():
161 self.nodetagscache.setdefault(n, []).append(t)
161 self.nodetagscache.setdefault(n, []).append(t)
162 return self.nodetagscache.get(node, [])
162 return self.nodetagscache.get(node, [])
163
163
164 def lookup(self, key):
164 def lookup(self, key):
165 try:
165 try:
166 return self.tags()[key]
166 return self.tags()[key]
167 except KeyError:
167 except KeyError:
168 try:
168 try:
169 return self.changelog.lookup(key)
169 return self.changelog.lookup(key)
170 except:
170 except:
171 raise repo.RepoError(_("unknown revision '%s'") % key)
171 raise repo.RepoError(_("unknown revision '%s'") % key)
172
172
173 def dev(self):
173 def dev(self):
174 return os.stat(self.path).st_dev
174 return os.stat(self.path).st_dev
175
175
176 def local(self):
176 def local(self):
177 return True
177 return True
178
178
179 def join(self, f):
179 def join(self, f):
180 return os.path.join(self.path, f)
180 return os.path.join(self.path, f)
181
181
182 def wjoin(self, f):
182 def wjoin(self, f):
183 return os.path.join(self.root, f)
183 return os.path.join(self.root, f)
184
184
185 def file(self, f):
185 def file(self, f):
186 if f[0] == '/':
186 if f[0] == '/':
187 f = f[1:]
187 f = f[1:]
188 return filelog.filelog(self.opener, f, self.revlogversion)
188 return filelog.filelog(self.opener, f, self.revlogversion)
189
189
190 def getcwd(self):
190 def getcwd(self):
191 return self.dirstate.getcwd()
191 return self.dirstate.getcwd()
192
192
193 def wfile(self, f, mode='r'):
193 def wfile(self, f, mode='r'):
194 return self.wopener(f, mode)
194 return self.wopener(f, mode)
195
195
196 def wread(self, filename):
196 def wread(self, filename):
197 if self.encodepats == None:
197 if self.encodepats == None:
198 l = []
198 l = []
199 for pat, cmd in self.ui.configitems("encode"):
199 for pat, cmd in self.ui.configitems("encode"):
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
201 l.append((mf, cmd))
201 l.append((mf, cmd))
202 self.encodepats = l
202 self.encodepats = l
203
203
204 data = self.wopener(filename, 'r').read()
204 data = self.wopener(filename, 'r').read()
205
205
206 for mf, cmd in self.encodepats:
206 for mf, cmd in self.encodepats:
207 if mf(filename):
207 if mf(filename):
208 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
208 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
209 data = util.filter(data, cmd)
209 data = util.filter(data, cmd)
210 break
210 break
211
211
212 return data
212 return data
213
213
214 def wwrite(self, filename, data, fd=None):
214 def wwrite(self, filename, data, fd=None):
215 if self.decodepats == None:
215 if self.decodepats == None:
216 l = []
216 l = []
217 for pat, cmd in self.ui.configitems("decode"):
217 for pat, cmd in self.ui.configitems("decode"):
218 mf = util.matcher(self.root, "", [pat], [], [])[1]
218 mf = util.matcher(self.root, "", [pat], [], [])[1]
219 l.append((mf, cmd))
219 l.append((mf, cmd))
220 self.decodepats = l
220 self.decodepats = l
221
221
222 for mf, cmd in self.decodepats:
222 for mf, cmd in self.decodepats:
223 if mf(filename):
223 if mf(filename):
224 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
224 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
225 data = util.filter(data, cmd)
225 data = util.filter(data, cmd)
226 break
226 break
227
227
228 if fd:
228 if fd:
229 return fd.write(data)
229 return fd.write(data)
230 return self.wopener(filename, 'w').write(data)
230 return self.wopener(filename, 'w').write(data)
231
231
232 def transaction(self):
232 def transaction(self):
233 tr = self.transhandle
233 tr = self.transhandle
234 if tr != None and tr.running():
234 if tr != None and tr.running():
235 return tr.nest()
235 return tr.nest()
236
236
237 # save dirstate for undo
237 # save dirstate for undo
238 try:
238 try:
239 ds = self.opener("dirstate").read()
239 ds = self.opener("dirstate").read()
240 except IOError:
240 except IOError:
241 ds = ""
241 ds = ""
242 self.opener("journal.dirstate", "w").write(ds)
242 self.opener("journal.dirstate", "w").write(ds)
243
243
244 tr = transaction.transaction(self.ui.warn, self.opener,
244 tr = transaction.transaction(self.ui.warn, self.opener,
245 self.join("journal"),
245 self.join("journal"),
246 aftertrans(self.path))
246 aftertrans(self.path))
247 self.transhandle = tr
247 self.transhandle = tr
248 return tr
248 return tr
249
249
250 def recover(self):
250 def recover(self):
251 l = self.lock()
251 l = self.lock()
252 if os.path.exists(self.join("journal")):
252 if os.path.exists(self.join("journal")):
253 self.ui.status(_("rolling back interrupted transaction\n"))
253 self.ui.status(_("rolling back interrupted transaction\n"))
254 transaction.rollback(self.opener, self.join("journal"))
254 transaction.rollback(self.opener, self.join("journal"))
255 self.reload()
255 self.reload()
256 return True
256 return True
257 else:
257 else:
258 self.ui.warn(_("no interrupted transaction available\n"))
258 self.ui.warn(_("no interrupted transaction available\n"))
259 return False
259 return False
260
260
261 def undo(self, wlock=None):
261 def undo(self, wlock=None):
262 if not wlock:
262 if not wlock:
263 wlock = self.wlock()
263 wlock = self.wlock()
264 l = self.lock()
264 l = self.lock()
265 if os.path.exists(self.join("undo")):
265 if os.path.exists(self.join("undo")):
266 self.ui.status(_("rolling back last transaction\n"))
266 self.ui.status(_("rolling back last transaction\n"))
267 transaction.rollback(self.opener, self.join("undo"))
267 transaction.rollback(self.opener, self.join("undo"))
268 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
268 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
269 self.reload()
269 self.reload()
270 self.wreload()
270 self.wreload()
271 else:
271 else:
272 self.ui.warn(_("no undo information available\n"))
272 self.ui.warn(_("no undo information available\n"))
273
273
274 def wreload(self):
274 def wreload(self):
275 self.dirstate.read()
275 self.dirstate.read()
276
276
277 def reload(self):
277 def reload(self):
278 self.changelog.load()
278 self.changelog.load()
279 self.manifest.load()
279 self.manifest.load()
280 self.tagscache = None
280 self.tagscache = None
281 self.nodetagscache = None
281 self.nodetagscache = None
282
282
283 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
283 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
284 desc=None):
284 desc=None):
285 try:
285 try:
286 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
286 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
287 except lock.LockHeld, inst:
287 except lock.LockHeld, inst:
288 if not wait:
288 if not wait:
289 raise
289 raise
290 self.ui.warn(_("waiting for lock on %s held by %s\n") %
290 self.ui.warn(_("waiting for lock on %s held by %s\n") %
291 (desc, inst.args[0]))
291 (desc, inst.args[0]))
292 # default to 600 seconds timeout
292 # default to 600 seconds timeout
293 l = lock.lock(self.join(lockname),
293 l = lock.lock(self.join(lockname),
294 int(self.ui.config("ui", "timeout") or 600),
294 int(self.ui.config("ui", "timeout") or 600),
295 releasefn, desc=desc)
295 releasefn, desc=desc)
296 if acquirefn:
296 if acquirefn:
297 acquirefn()
297 acquirefn()
298 return l
298 return l
299
299
300 def lock(self, wait=1):
300 def lock(self, wait=1):
301 return self.do_lock("lock", wait, acquirefn=self.reload,
301 return self.do_lock("lock", wait, acquirefn=self.reload,
302 desc=_('repository %s') % self.origroot)
302 desc=_('repository %s') % self.origroot)
303
303
304 def wlock(self, wait=1):
304 def wlock(self, wait=1):
305 return self.do_lock("wlock", wait, self.dirstate.write,
305 return self.do_lock("wlock", wait, self.dirstate.write,
306 self.wreload,
306 self.wreload,
307 desc=_('working directory of %s') % self.origroot)
307 desc=_('working directory of %s') % self.origroot)
308
308
309 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
309 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
310 "determine whether a new filenode is needed"
310 "determine whether a new filenode is needed"
311 fp1 = manifest1.get(filename, nullid)
311 fp1 = manifest1.get(filename, nullid)
312 fp2 = manifest2.get(filename, nullid)
312 fp2 = manifest2.get(filename, nullid)
313
313
314 if fp2 != nullid:
314 if fp2 != nullid:
315 # is one parent an ancestor of the other?
315 # is one parent an ancestor of the other?
316 fpa = filelog.ancestor(fp1, fp2)
316 fpa = filelog.ancestor(fp1, fp2)
317 if fpa == fp1:
317 if fpa == fp1:
318 fp1, fp2 = fp2, nullid
318 fp1, fp2 = fp2, nullid
319 elif fpa == fp2:
319 elif fpa == fp2:
320 fp2 = nullid
320 fp2 = nullid
321
321
322 # is the file unmodified from the parent? report existing entry
322 # is the file unmodified from the parent? report existing entry
323 if fp2 == nullid and text == filelog.read(fp1):
323 if fp2 == nullid and text == filelog.read(fp1):
324 return (fp1, None, None)
324 return (fp1, None, None)
325
325
326 return (None, fp1, fp2)
326 return (None, fp1, fp2)
327
327
328 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
328 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
329 orig_parent = self.dirstate.parents()[0] or nullid
329 orig_parent = self.dirstate.parents()[0] or nullid
330 p1 = p1 or self.dirstate.parents()[0] or nullid
330 p1 = p1 or self.dirstate.parents()[0] or nullid
331 p2 = p2 or self.dirstate.parents()[1] or nullid
331 p2 = p2 or self.dirstate.parents()[1] or nullid
332 c1 = self.changelog.read(p1)
332 c1 = self.changelog.read(p1)
333 c2 = self.changelog.read(p2)
333 c2 = self.changelog.read(p2)
334 m1 = self.manifest.read(c1[0])
334 m1 = self.manifest.read(c1[0])
335 mf1 = self.manifest.readflags(c1[0])
335 mf1 = self.manifest.readflags(c1[0])
336 m2 = self.manifest.read(c2[0])
336 m2 = self.manifest.read(c2[0])
337 changed = []
337 changed = []
338
338
339 if orig_parent == p1:
339 if orig_parent == p1:
340 update_dirstate = 1
340 update_dirstate = 1
341 else:
341 else:
342 update_dirstate = 0
342 update_dirstate = 0
343
343
344 if not wlock:
344 if not wlock:
345 wlock = self.wlock()
345 wlock = self.wlock()
346 l = self.lock()
346 l = self.lock()
347 tr = self.transaction()
347 tr = self.transaction()
348 mm = m1.copy()
348 mm = m1.copy()
349 mfm = mf1.copy()
349 mfm = mf1.copy()
350 linkrev = self.changelog.count()
350 linkrev = self.changelog.count()
351 for f in files:
351 for f in files:
352 try:
352 try:
353 t = self.wread(f)
353 t = self.wread(f)
354 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
354 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
355 r = self.file(f)
355 r = self.file(f)
356 mfm[f] = tm
356 mfm[f] = tm
357
357
358 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
358 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
359 if entry:
359 if entry:
360 mm[f] = entry
360 mm[f] = entry
361 continue
361 continue
362
362
363 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
363 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
364 changed.append(f)
364 changed.append(f)
365 if update_dirstate:
365 if update_dirstate:
366 self.dirstate.update([f], "n")
366 self.dirstate.update([f], "n")
367 except IOError:
367 except IOError:
368 try:
368 try:
369 del mm[f]
369 del mm[f]
370 del mfm[f]
370 del mfm[f]
371 if update_dirstate:
371 if update_dirstate:
372 self.dirstate.forget([f])
372 self.dirstate.forget([f])
373 except:
373 except:
374 # deleted from p2?
374 # deleted from p2?
375 pass
375 pass
376
376
377 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
377 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
378 user = user or self.ui.username()
378 user = user or self.ui.username()
379 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
379 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
380 tr.close()
380 tr.close()
381 if update_dirstate:
381 if update_dirstate:
382 self.dirstate.setparents(n, nullid)
382 self.dirstate.setparents(n, nullid)
383
383
384 def commit(self, files=None, text="", user=None, date=None,
384 def commit(self, files=None, text="", user=None, date=None,
385 match=util.always, force=False, lock=None, wlock=None):
385 match=util.always, force=False, lock=None, wlock=None):
386 commit = []
386 commit = []
387 remove = []
387 remove = []
388 changed = []
388 changed = []
389
389
390 if files:
390 if files:
391 for f in files:
391 for f in files:
392 s = self.dirstate.state(f)
392 s = self.dirstate.state(f)
393 if s in 'nmai':
393 if s in 'nmai':
394 commit.append(f)
394 commit.append(f)
395 elif s == 'r':
395 elif s == 'r':
396 remove.append(f)
396 remove.append(f)
397 else:
397 else:
398 self.ui.warn(_("%s not tracked!\n") % f)
398 self.ui.warn(_("%s not tracked!\n") % f)
399 else:
399 else:
400 modified, added, removed, deleted, unknown = self.changes(match=match)
400 modified, added, removed, deleted, unknown = self.changes(match=match)
401 commit = modified + added
401 commit = modified + added
402 remove = removed
402 remove = removed
403
403
404 p1, p2 = self.dirstate.parents()
404 p1, p2 = self.dirstate.parents()
405 c1 = self.changelog.read(p1)
405 c1 = self.changelog.read(p1)
406 c2 = self.changelog.read(p2)
406 c2 = self.changelog.read(p2)
407 m1 = self.manifest.read(c1[0])
407 m1 = self.manifest.read(c1[0])
408 mf1 = self.manifest.readflags(c1[0])
408 mf1 = self.manifest.readflags(c1[0])
409 m2 = self.manifest.read(c2[0])
409 m2 = self.manifest.read(c2[0])
410
410
411 if not commit and not remove and not force and p2 == nullid:
411 if not commit and not remove and not force and p2 == nullid:
412 self.ui.status(_("nothing changed\n"))
412 self.ui.status(_("nothing changed\n"))
413 return None
413 return None
414
414
415 xp1 = hex(p1)
415 xp1 = hex(p1)
416 if p2 == nullid: xp2 = ''
416 if p2 == nullid: xp2 = ''
417 else: xp2 = hex(p2)
417 else: xp2 = hex(p2)
418
418
419 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
419 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
420
420
421 if not wlock:
421 if not wlock:
422 wlock = self.wlock()
422 wlock = self.wlock()
423 if not lock:
423 if not lock:
424 lock = self.lock()
424 lock = self.lock()
425 tr = self.transaction()
425 tr = self.transaction()
426
426
427 # check in files
427 # check in files
428 new = {}
428 new = {}
429 linkrev = self.changelog.count()
429 linkrev = self.changelog.count()
430 commit.sort()
430 commit.sort()
431 for f in commit:
431 for f in commit:
432 self.ui.note(f + "\n")
432 self.ui.note(f + "\n")
433 try:
433 try:
434 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
434 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
435 t = self.wread(f)
435 t = self.wread(f)
436 except IOError:
436 except IOError:
437 self.ui.warn(_("trouble committing %s!\n") % f)
437 self.ui.warn(_("trouble committing %s!\n") % f)
438 raise
438 raise
439
439
440 r = self.file(f)
440 r = self.file(f)
441
441
442 meta = {}
442 meta = {}
443 cp = self.dirstate.copied(f)
443 cp = self.dirstate.copied(f)
444 if cp:
444 if cp:
445 meta["copy"] = cp
445 meta["copy"] = cp
446 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
446 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
447 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
447 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
448 fp1, fp2 = nullid, nullid
448 fp1, fp2 = nullid, nullid
449 else:
449 else:
450 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
450 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
451 if entry:
451 if entry:
452 new[f] = entry
452 new[f] = entry
453 continue
453 continue
454
454
455 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
455 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
456 # remember what we've added so that we can later calculate
456 # remember what we've added so that we can later calculate
457 # the files to pull from a set of changesets
457 # the files to pull from a set of changesets
458 changed.append(f)
458 changed.append(f)
459
459
460 # update manifest
460 # update manifest
461 m1 = m1.copy()
461 m1 = m1.copy()
462 m1.update(new)
462 m1.update(new)
463 for f in remove:
463 for f in remove:
464 if f in m1:
464 if f in m1:
465 del m1[f]
465 del m1[f]
466 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
466 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
467 (new, remove))
467 (new, remove))
468
468
469 # add changeset
469 # add changeset
470 new = new.keys()
470 new = new.keys()
471 new.sort()
471 new.sort()
472
472
473 user = user or self.ui.username()
473 user = user or self.ui.username()
474 if not text:
474 if not text:
475 edittext = [""]
475 edittext = [""]
476 if p2 != nullid:
476 if p2 != nullid:
477 edittext.append("HG: branch merge")
477 edittext.append("HG: branch merge")
478 edittext.extend(["HG: changed %s" % f for f in changed])
478 edittext.extend(["HG: changed %s" % f for f in changed])
479 edittext.extend(["HG: removed %s" % f for f in remove])
479 edittext.extend(["HG: removed %s" % f for f in remove])
480 if not changed and not remove:
480 if not changed and not remove:
481 edittext.append("HG: no files changed")
481 edittext.append("HG: no files changed")
482 edittext.append("")
482 edittext.append("")
483 # run editor in the repository root
483 # run editor in the repository root
484 olddir = os.getcwd()
484 olddir = os.getcwd()
485 os.chdir(self.root)
485 os.chdir(self.root)
486 edittext = self.ui.edit("\n".join(edittext), user)
486 edittext = self.ui.edit("\n".join(edittext), user)
487 os.chdir(olddir)
487 os.chdir(olddir)
488 if not edittext.rstrip():
488 if not edittext.rstrip():
489 return None
489 return None
490 text = edittext
490 text = edittext
491
491
492 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
492 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
493 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
493 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
494 parent2=xp2)
494 parent2=xp2)
495 tr.close()
495 tr.close()
496
496
497 self.dirstate.setparents(n)
497 self.dirstate.setparents(n)
498 self.dirstate.update(new, "n")
498 self.dirstate.update(new, "n")
499 self.dirstate.forget(remove)
499 self.dirstate.forget(remove)
500
500
501 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
501 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
502 return n
502 return n
503
503
504 def walk(self, node=None, files=[], match=util.always, badmatch=None):
504 def walk(self, node=None, files=[], match=util.always, badmatch=None):
505 if node:
505 if node:
506 fdict = dict.fromkeys(files)
506 fdict = dict.fromkeys(files)
507 for fn in self.manifest.read(self.changelog.read(node)[0]):
507 for fn in self.manifest.read(self.changelog.read(node)[0]):
508 fdict.pop(fn, None)
508 fdict.pop(fn, None)
509 if match(fn):
509 if match(fn):
510 yield 'm', fn
510 yield 'm', fn
511 for fn in fdict:
511 for fn in fdict:
512 if badmatch and badmatch(fn):
512 if badmatch and badmatch(fn):
513 if match(fn):
513 if match(fn):
514 yield 'b', fn
514 yield 'b', fn
515 else:
515 else:
516 self.ui.warn(_('%s: No such file in rev %s\n') % (
516 self.ui.warn(_('%s: No such file in rev %s\n') % (
517 util.pathto(self.getcwd(), fn), short(node)))
517 util.pathto(self.getcwd(), fn), short(node)))
518 else:
518 else:
519 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
519 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
520 yield src, fn
520 yield src, fn
521
521
522 def changes(self, node1=None, node2=None, files=[], match=util.always,
522 def changes(self, node1=None, node2=None, files=[], match=util.always,
523 wlock=None, show_ignored=None):
523 wlock=None, show_ignored=None):
524 """return changes between two nodes or node and working directory
524 """return changes between two nodes or node and working directory
525
525
526 If node1 is None, use the first dirstate parent instead.
526 If node1 is None, use the first dirstate parent instead.
527 If node2 is None, compare node1 with working directory.
527 If node2 is None, compare node1 with working directory.
528 """
528 """
529
529
530 def fcmp(fn, mf):
530 def fcmp(fn, mf):
531 t1 = self.wread(fn)
531 t1 = self.wread(fn)
532 t2 = self.file(fn).read(mf.get(fn, nullid))
532 t2 = self.file(fn).read(mf.get(fn, nullid))
533 return cmp(t1, t2)
533 return cmp(t1, t2)
534
534
535 def mfmatches(node):
535 def mfmatches(node):
536 change = self.changelog.read(node)
536 change = self.changelog.read(node)
537 mf = dict(self.manifest.read(change[0]))
537 mf = dict(self.manifest.read(change[0]))
538 for fn in mf.keys():
538 for fn in mf.keys():
539 if not match(fn):
539 if not match(fn):
540 del mf[fn]
540 del mf[fn]
541 return mf
541 return mf
542
542
543 if node1:
543 if node1:
544 # read the manifest from node1 before the manifest from node2,
544 # read the manifest from node1 before the manifest from node2,
545 # so that we'll hit the manifest cache if we're going through
545 # so that we'll hit the manifest cache if we're going through
546 # all the revisions in parent->child order.
546 # all the revisions in parent->child order.
547 mf1 = mfmatches(node1)
547 mf1 = mfmatches(node1)
548
548
549 # are we comparing the working directory?
549 # are we comparing the working directory?
550 if not node2:
550 if not node2:
551 if not wlock:
551 if not wlock:
552 try:
552 try:
553 wlock = self.wlock(wait=0)
553 wlock = self.wlock(wait=0)
554 except lock.LockException:
554 except lock.LockException:
555 wlock = None
555 wlock = None
556 lookup, modified, added, removed, deleted, unknown, ignored = (
556 lookup, modified, added, removed, deleted, unknown, ignored = (
557 self.dirstate.changes(files, match, show_ignored))
557 self.dirstate.changes(files, match, show_ignored))
558
558
559 # are we comparing working dir against its parent?
559 # are we comparing working dir against its parent?
560 if not node1:
560 if not node1:
561 if lookup:
561 if lookup:
562 # do a full compare of any files that might have changed
562 # do a full compare of any files that might have changed
563 mf2 = mfmatches(self.dirstate.parents()[0])
563 mf2 = mfmatches(self.dirstate.parents()[0])
564 for f in lookup:
564 for f in lookup:
565 if fcmp(f, mf2):
565 if fcmp(f, mf2):
566 modified.append(f)
566 modified.append(f)
567 elif wlock is not None:
567 elif wlock is not None:
568 self.dirstate.update([f], "n")
568 self.dirstate.update([f], "n")
569 else:
569 else:
570 # we are comparing working dir against non-parent
570 # we are comparing working dir against non-parent
571 # generate a pseudo-manifest for the working dir
571 # generate a pseudo-manifest for the working dir
572 mf2 = mfmatches(self.dirstate.parents()[0])
572 mf2 = mfmatches(self.dirstate.parents()[0])
573 for f in lookup + modified + added:
573 for f in lookup + modified + added:
574 mf2[f] = ""
574 mf2[f] = ""
575 for f in removed:
575 for f in removed:
576 if f in mf2:
576 if f in mf2:
577 del mf2[f]
577 del mf2[f]
578 else:
578 else:
579 # we are comparing two revisions
579 # we are comparing two revisions
580 deleted, unknown, ignored = [], [], []
580 deleted, unknown, ignored = [], [], []
581 mf2 = mfmatches(node2)
581 mf2 = mfmatches(node2)
582
582
583 if node1:
583 if node1:
584 # flush lists from dirstate before comparing manifests
584 # flush lists from dirstate before comparing manifests
585 modified, added = [], []
585 modified, added = [], []
586
586
587 for fn in mf2:
587 for fn in mf2:
588 if mf1.has_key(fn):
588 if mf1.has_key(fn):
589 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
589 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
590 modified.append(fn)
590 modified.append(fn)
591 del mf1[fn]
591 del mf1[fn]
592 else:
592 else:
593 added.append(fn)
593 added.append(fn)
594
594
595 removed = mf1.keys()
595 removed = mf1.keys()
596
596
597 # sort and return results:
597 # sort and return results:
598 for l in modified, added, removed, deleted, unknown, ignored:
598 for l in modified, added, removed, deleted, unknown, ignored:
599 l.sort()
599 l.sort()
600 if show_ignored is None:
600 if show_ignored is None:
601 return (modified, added, removed, deleted, unknown)
601 return (modified, added, removed, deleted, unknown)
602 else:
602 else:
603 return (modified, added, removed, deleted, unknown, ignored)
603 return (modified, added, removed, deleted, unknown, ignored)
604
604
605 def add(self, list, wlock=None):
605 def add(self, list, wlock=None):
606 if not wlock:
606 if not wlock:
607 wlock = self.wlock()
607 wlock = self.wlock()
608 for f in list:
608 for f in list:
609 p = self.wjoin(f)
609 p = self.wjoin(f)
610 if not os.path.exists(p):
610 if not os.path.exists(p):
611 self.ui.warn(_("%s does not exist!\n") % f)
611 self.ui.warn(_("%s does not exist!\n") % f)
612 elif not os.path.isfile(p):
612 elif not os.path.isfile(p):
613 self.ui.warn(_("%s not added: only files supported currently\n")
613 self.ui.warn(_("%s not added: only files supported currently\n")
614 % f)
614 % f)
615 elif self.dirstate.state(f) in 'an':
615 elif self.dirstate.state(f) in 'an':
616 self.ui.warn(_("%s already tracked!\n") % f)
616 self.ui.warn(_("%s already tracked!\n") % f)
617 else:
617 else:
618 self.dirstate.update([f], "a")
618 self.dirstate.update([f], "a")
619
619
620 def forget(self, list, wlock=None):
620 def forget(self, list, wlock=None):
621 if not wlock:
621 if not wlock:
622 wlock = self.wlock()
622 wlock = self.wlock()
623 for f in list:
623 for f in list:
624 if self.dirstate.state(f) not in 'ai':
624 if self.dirstate.state(f) not in 'ai':
625 self.ui.warn(_("%s not added!\n") % f)
625 self.ui.warn(_("%s not added!\n") % f)
626 else:
626 else:
627 self.dirstate.forget([f])
627 self.dirstate.forget([f])
628
628
629 def remove(self, list, unlink=False, wlock=None):
629 def remove(self, list, unlink=False, wlock=None):
630 if unlink:
630 if unlink:
631 for f in list:
631 for f in list:
632 try:
632 try:
633 util.unlink(self.wjoin(f))
633 util.unlink(self.wjoin(f))
634 except OSError, inst:
634 except OSError, inst:
635 if inst.errno != errno.ENOENT:
635 if inst.errno != errno.ENOENT:
636 raise
636 raise
637 if not wlock:
637 if not wlock:
638 wlock = self.wlock()
638 wlock = self.wlock()
639 for f in list:
639 for f in list:
640 p = self.wjoin(f)
640 p = self.wjoin(f)
641 if os.path.exists(p):
641 if os.path.exists(p):
642 self.ui.warn(_("%s still exists!\n") % f)
642 self.ui.warn(_("%s still exists!\n") % f)
643 elif self.dirstate.state(f) == 'a':
643 elif self.dirstate.state(f) == 'a':
644 self.dirstate.forget([f])
644 self.dirstate.forget([f])
645 elif f not in self.dirstate:
645 elif f not in self.dirstate:
646 self.ui.warn(_("%s not tracked!\n") % f)
646 self.ui.warn(_("%s not tracked!\n") % f)
647 else:
647 else:
648 self.dirstate.update([f], "r")
648 self.dirstate.update([f], "r")
649
649
650 def undelete(self, list, wlock=None):
650 def undelete(self, list, wlock=None):
651 p = self.dirstate.parents()[0]
651 p = self.dirstate.parents()[0]
652 mn = self.changelog.read(p)[0]
652 mn = self.changelog.read(p)[0]
653 mf = self.manifest.readflags(mn)
653 mf = self.manifest.readflags(mn)
654 m = self.manifest.read(mn)
654 m = self.manifest.read(mn)
655 if not wlock:
655 if not wlock:
656 wlock = self.wlock()
656 wlock = self.wlock()
657 for f in list:
657 for f in list:
658 if self.dirstate.state(f) not in "r":
658 if self.dirstate.state(f) not in "r":
659 self.ui.warn("%s not removed!\n" % f)
659 self.ui.warn("%s not removed!\n" % f)
660 else:
660 else:
661 t = self.file(f).read(m[f])
661 t = self.file(f).read(m[f])
662 self.wwrite(f, t)
662 self.wwrite(f, t)
663 util.set_exec(self.wjoin(f), mf[f])
663 util.set_exec(self.wjoin(f), mf[f])
664 self.dirstate.update([f], "n")
664 self.dirstate.update([f], "n")
665
665
666 def copy(self, source, dest, wlock=None):
666 def copy(self, source, dest, wlock=None):
667 p = self.wjoin(dest)
667 p = self.wjoin(dest)
668 if not os.path.exists(p):
668 if not os.path.exists(p):
669 self.ui.warn(_("%s does not exist!\n") % dest)
669 self.ui.warn(_("%s does not exist!\n") % dest)
670 elif not os.path.isfile(p):
670 elif not os.path.isfile(p):
671 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
671 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
672 else:
672 else:
673 if not wlock:
673 if not wlock:
674 wlock = self.wlock()
674 wlock = self.wlock()
675 if self.dirstate.state(dest) == '?':
675 if self.dirstate.state(dest) == '?':
676 self.dirstate.update([dest], "a")
676 self.dirstate.update([dest], "a")
677 self.dirstate.copy(source, dest)
677 self.dirstate.copy(source, dest)
678
678
679 def heads(self, start=None):
679 def heads(self, start=None):
680 heads = self.changelog.heads(start)
680 heads = self.changelog.heads(start)
681 # sort the output in rev descending order
681 # sort the output in rev descending order
682 heads = [(-self.changelog.rev(h), h) for h in heads]
682 heads = [(-self.changelog.rev(h), h) for h in heads]
683 heads.sort()
683 heads.sort()
684 return [n for (r, n) in heads]
684 return [n for (r, n) in heads]
685
685
686 # branchlookup returns a dict giving a list of branches for
686 # branchlookup returns a dict giving a list of branches for
687 # each head. A branch is defined as the tag of a node or
687 # each head. A branch is defined as the tag of a node or
688 # the branch of the node's parents. If a node has multiple
688 # the branch of the node's parents. If a node has multiple
689 # branch tags, tags are eliminated if they are visible from other
689 # branch tags, tags are eliminated if they are visible from other
690 # branch tags.
690 # branch tags.
691 #
691 #
692 # So, for this graph: a->b->c->d->e
692 # So, for this graph: a->b->c->d->e
693 # \ /
693 # \ /
694 # aa -----/
694 # aa -----/
695 # a has tag 2.6.12
695 # a has tag 2.6.12
696 # d has tag 2.6.13
696 # d has tag 2.6.13
697 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
697 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
698 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
698 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
699 # from the list.
699 # from the list.
700 #
700 #
701 # It is possible that more than one head will have the same branch tag.
701 # It is possible that more than one head will have the same branch tag.
702 # callers need to check the result for multiple heads under the same
702 # callers need to check the result for multiple heads under the same
703 # branch tag if that is a problem for them (ie checkout of a specific
703 # branch tag if that is a problem for them (ie checkout of a specific
704 # branch).
704 # branch).
705 #
705 #
706 # passing in a specific branch will limit the depth of the search
706 # passing in a specific branch will limit the depth of the search
707 # through the parents. It won't limit the branches returned in the
707 # through the parents. It won't limit the branches returned in the
708 # result though.
708 # result though.
709 def branchlookup(self, heads=None, branch=None):
709 def branchlookup(self, heads=None, branch=None):
710 if not heads:
710 if not heads:
711 heads = self.heads()
711 heads = self.heads()
712 headt = [ h for h in heads ]
712 headt = [ h for h in heads ]
713 chlog = self.changelog
713 chlog = self.changelog
714 branches = {}
714 branches = {}
715 merges = []
715 merges = []
716 seenmerge = {}
716 seenmerge = {}
717
717
718 # traverse the tree once for each head, recording in the branches
718 # traverse the tree once for each head, recording in the branches
719 # dict which tags are visible from this head. The branches
719 # dict which tags are visible from this head. The branches
720 # dict also records which tags are visible from each tag
720 # dict also records which tags are visible from each tag
721 # while we traverse.
721 # while we traverse.
722 while headt or merges:
722 while headt or merges:
723 if merges:
723 if merges:
724 n, found = merges.pop()
724 n, found = merges.pop()
725 visit = [n]
725 visit = [n]
726 else:
726 else:
727 h = headt.pop()
727 h = headt.pop()
728 visit = [h]
728 visit = [h]
729 found = [h]
729 found = [h]
730 seen = {}
730 seen = {}
731 while visit:
731 while visit:
732 n = visit.pop()
732 n = visit.pop()
733 if n in seen:
733 if n in seen:
734 continue
734 continue
735 pp = chlog.parents(n)
735 pp = chlog.parents(n)
736 tags = self.nodetags(n)
736 tags = self.nodetags(n)
737 if tags:
737 if tags:
738 for x in tags:
738 for x in tags:
739 if x == 'tip':
739 if x == 'tip':
740 continue
740 continue
741 for f in found:
741 for f in found:
742 branches.setdefault(f, {})[n] = 1
742 branches.setdefault(f, {})[n] = 1
743 branches.setdefault(n, {})[n] = 1
743 branches.setdefault(n, {})[n] = 1
744 break
744 break
745 if n not in found:
745 if n not in found:
746 found.append(n)
746 found.append(n)
747 if branch in tags:
747 if branch in tags:
748 continue
748 continue
749 seen[n] = 1
749 seen[n] = 1
750 if pp[1] != nullid and n not in seenmerge:
750 if pp[1] != nullid and n not in seenmerge:
751 merges.append((pp[1], [x for x in found]))
751 merges.append((pp[1], [x for x in found]))
752 seenmerge[n] = 1
752 seenmerge[n] = 1
753 if pp[0] != nullid:
753 if pp[0] != nullid:
754 visit.append(pp[0])
754 visit.append(pp[0])
755 # traverse the branches dict, eliminating branch tags from each
755 # traverse the branches dict, eliminating branch tags from each
756 # head that are visible from another branch tag for that head.
756 # head that are visible from another branch tag for that head.
757 out = {}
757 out = {}
758 viscache = {}
758 viscache = {}
759 for h in heads:
759 for h in heads:
760 def visible(node):
760 def visible(node):
761 if node in viscache:
761 if node in viscache:
762 return viscache[node]
762 return viscache[node]
763 ret = {}
763 ret = {}
764 visit = [node]
764 visit = [node]
765 while visit:
765 while visit:
766 x = visit.pop()
766 x = visit.pop()
767 if x in viscache:
767 if x in viscache:
768 ret.update(viscache[x])
768 ret.update(viscache[x])
769 elif x not in ret:
769 elif x not in ret:
770 ret[x] = 1
770 ret[x] = 1
771 if x in branches:
771 if x in branches:
772 visit[len(visit):] = branches[x].keys()
772 visit[len(visit):] = branches[x].keys()
773 viscache[node] = ret
773 viscache[node] = ret
774 return ret
774 return ret
775 if h not in branches:
775 if h not in branches:
776 continue
776 continue
777 # O(n^2), but somewhat limited. This only searches the
777 # O(n^2), but somewhat limited. This only searches the
778 # tags visible from a specific head, not all the tags in the
778 # tags visible from a specific head, not all the tags in the
779 # whole repo.
779 # whole repo.
780 for b in branches[h]:
780 for b in branches[h]:
781 vis = False
781 vis = False
782 for bb in branches[h].keys():
782 for bb in branches[h].keys():
783 if b != bb:
783 if b != bb:
784 if b in visible(bb):
784 if b in visible(bb):
785 vis = True
785 vis = True
786 break
786 break
787 if not vis:
787 if not vis:
788 l = out.setdefault(h, [])
788 l = out.setdefault(h, [])
789 l[len(l):] = self.nodetags(b)
789 l[len(l):] = self.nodetags(b)
790 return out
790 return out
791
791
792 def branches(self, nodes):
792 def branches(self, nodes):
793 if not nodes:
793 if not nodes:
794 nodes = [self.changelog.tip()]
794 nodes = [self.changelog.tip()]
795 b = []
795 b = []
796 for n in nodes:
796 for n in nodes:
797 t = n
797 t = n
798 while n:
798 while n:
799 p = self.changelog.parents(n)
799 p = self.changelog.parents(n)
800 if p[1] != nullid or p[0] == nullid:
800 if p[1] != nullid or p[0] == nullid:
801 b.append((t, n, p[0], p[1]))
801 b.append((t, n, p[0], p[1]))
802 break
802 break
803 n = p[0]
803 n = p[0]
804 return b
804 return b
805
805
806 def between(self, pairs):
806 def between(self, pairs):
807 r = []
807 r = []
808
808
809 for top, bottom in pairs:
809 for top, bottom in pairs:
810 n, l, i = top, [], 0
810 n, l, i = top, [], 0
811 f = 1
811 f = 1
812
812
813 while n != bottom:
813 while n != bottom:
814 p = self.changelog.parents(n)[0]
814 p = self.changelog.parents(n)[0]
815 if i == f:
815 if i == f:
816 l.append(n)
816 l.append(n)
817 f = f * 2
817 f = f * 2
818 n = p
818 n = p
819 i += 1
819 i += 1
820
820
821 r.append(l)
821 r.append(l)
822
822
823 return r
823 return r
824
824
825 def findincoming(self, remote, base=None, heads=None, force=False):
825 def findincoming(self, remote, base=None, heads=None, force=False):
826 m = self.changelog.nodemap
826 m = self.changelog.nodemap
827 search = []
827 search = []
828 fetch = {}
828 fetch = {}
829 seen = {}
829 seen = {}
830 seenbranch = {}
830 seenbranch = {}
831 if base == None:
831 if base == None:
832 base = {}
832 base = {}
833
833
834 # assume we're closer to the tip than the root
834 # assume we're closer to the tip than the root
835 # and start by examining the heads
835 # and start by examining the heads
836 self.ui.status(_("searching for changes\n"))
836 self.ui.status(_("searching for changes\n"))
837
837
838 if not heads:
838 if not heads:
839 heads = remote.heads()
839 heads = remote.heads()
840
840
841 unknown = []
841 unknown = []
842 for h in heads:
842 for h in heads:
843 if h not in m:
843 if h not in m:
844 unknown.append(h)
844 unknown.append(h)
845 else:
845 else:
846 base[h] = 1
846 base[h] = 1
847
847
848 if not unknown:
848 if not unknown:
849 return []
849 return []
850
850
851 rep = {}
851 rep = {}
852 reqcnt = 0
852 reqcnt = 0
853
853
854 # search through remote branches
854 # search through remote branches
855 # a 'branch' here is a linear segment of history, with four parts:
855 # a 'branch' here is a linear segment of history, with four parts:
856 # head, root, first parent, second parent
856 # head, root, first parent, second parent
857 # (a branch always has two parents (or none) by definition)
857 # (a branch always has two parents (or none) by definition)
858 unknown = remote.branches(unknown)
858 unknown = remote.branches(unknown)
859 while unknown:
859 while unknown:
860 r = []
860 r = []
861 while unknown:
861 while unknown:
862 n = unknown.pop(0)
862 n = unknown.pop(0)
863 if n[0] in seen:
863 if n[0] in seen:
864 continue
864 continue
865
865
866 self.ui.debug(_("examining %s:%s\n")
866 self.ui.debug(_("examining %s:%s\n")
867 % (short(n[0]), short(n[1])))
867 % (short(n[0]), short(n[1])))
868 if n[0] == nullid:
868 if n[0] == nullid:
869 break
869 break
870 if n in seenbranch:
870 if n in seenbranch:
871 self.ui.debug(_("branch already found\n"))
871 self.ui.debug(_("branch already found\n"))
872 continue
872 continue
873 if n[1] and n[1] in m: # do we know the base?
873 if n[1] and n[1] in m: # do we know the base?
874 self.ui.debug(_("found incomplete branch %s:%s\n")
874 self.ui.debug(_("found incomplete branch %s:%s\n")
875 % (short(n[0]), short(n[1])))
875 % (short(n[0]), short(n[1])))
876 search.append(n) # schedule branch range for scanning
876 search.append(n) # schedule branch range for scanning
877 seenbranch[n] = 1
877 seenbranch[n] = 1
878 else:
878 else:
879 if n[1] not in seen and n[1] not in fetch:
879 if n[1] not in seen and n[1] not in fetch:
880 if n[2] in m and n[3] in m:
880 if n[2] in m and n[3] in m:
881 self.ui.debug(_("found new changeset %s\n") %
881 self.ui.debug(_("found new changeset %s\n") %
882 short(n[1]))
882 short(n[1]))
883 fetch[n[1]] = 1 # earliest unknown
883 fetch[n[1]] = 1 # earliest unknown
884 base[n[2]] = 1 # latest known
884 base[n[2]] = 1 # latest known
885 continue
885 continue
886
886
887 for a in n[2:4]:
887 for a in n[2:4]:
888 if a not in rep:
888 if a not in rep:
889 r.append(a)
889 r.append(a)
890 rep[a] = 1
890 rep[a] = 1
891
891
892 seen[n[0]] = 1
892 seen[n[0]] = 1
893
893
894 if r:
894 if r:
895 reqcnt += 1
895 reqcnt += 1
896 self.ui.debug(_("request %d: %s\n") %
896 self.ui.debug(_("request %d: %s\n") %
897 (reqcnt, " ".join(map(short, r))))
897 (reqcnt, " ".join(map(short, r))))
898 for p in range(0, len(r), 10):
898 for p in range(0, len(r), 10):
899 for b in remote.branches(r[p:p+10]):
899 for b in remote.branches(r[p:p+10]):
900 self.ui.debug(_("received %s:%s\n") %
900 self.ui.debug(_("received %s:%s\n") %
901 (short(b[0]), short(b[1])))
901 (short(b[0]), short(b[1])))
902 if b[0] in m:
902 if b[0] in m:
903 self.ui.debug(_("found base node %s\n")
903 self.ui.debug(_("found base node %s\n")
904 % short(b[0]))
904 % short(b[0]))
905 base[b[0]] = 1
905 base[b[0]] = 1
906 elif b[0] not in seen:
906 elif b[0] not in seen:
907 unknown.append(b)
907 unknown.append(b)
908
908
909 # do binary search on the branches we found
909 # do binary search on the branches we found
910 while search:
910 while search:
911 n = search.pop(0)
911 n = search.pop(0)
912 reqcnt += 1
912 reqcnt += 1
913 l = remote.between([(n[0], n[1])])[0]
913 l = remote.between([(n[0], n[1])])[0]
914 l.append(n[1])
914 l.append(n[1])
915 p = n[0]
915 p = n[0]
916 f = 1
916 f = 1
917 for i in l:
917 for i in l:
918 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
918 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
919 if i in m:
919 if i in m:
920 if f <= 2:
920 if f <= 2:
921 self.ui.debug(_("found new branch changeset %s\n") %
921 self.ui.debug(_("found new branch changeset %s\n") %
922 short(p))
922 short(p))
923 fetch[p] = 1
923 fetch[p] = 1
924 base[i] = 1
924 base[i] = 1
925 else:
925 else:
926 self.ui.debug(_("narrowed branch search to %s:%s\n")
926 self.ui.debug(_("narrowed branch search to %s:%s\n")
927 % (short(p), short(i)))
927 % (short(p), short(i)))
928 search.append((p, i))
928 search.append((p, i))
929 break
929 break
930 p, f = i, f * 2
930 p, f = i, f * 2
931
931
932 # sanity check our fetch list
932 # sanity check our fetch list
933 for f in fetch.keys():
933 for f in fetch.keys():
934 if f in m:
934 if f in m:
935 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
935 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
936
936
937 if base.keys() == [nullid]:
937 if base.keys() == [nullid]:
938 if force:
938 if force:
939 self.ui.warn(_("warning: repository is unrelated\n"))
939 self.ui.warn(_("warning: repository is unrelated\n"))
940 else:
940 else:
941 raise util.Abort(_("repository is unrelated"))
941 raise util.Abort(_("repository is unrelated"))
942
942
943 self.ui.note(_("found new changesets starting at ") +
943 self.ui.note(_("found new changesets starting at ") +
944 " ".join([short(f) for f in fetch]) + "\n")
944 " ".join([short(f) for f in fetch]) + "\n")
945
945
946 self.ui.debug(_("%d total queries\n") % reqcnt)
946 self.ui.debug(_("%d total queries\n") % reqcnt)
947
947
948 return fetch.keys()
948 return fetch.keys()
949
949
950 def findoutgoing(self, remote, base=None, heads=None, force=False):
950 def findoutgoing(self, remote, base=None, heads=None, force=False):
951 """Return list of nodes that are roots of subsets not in remote
951 """Return list of nodes that are roots of subsets not in remote
952
952
953 If base dict is specified, assume that these nodes and their parents
953 If base dict is specified, assume that these nodes and their parents
954 exist on the remote side.
954 exist on the remote side.
955 If a list of heads is specified, return only nodes which are heads
955 If a list of heads is specified, return only nodes which are heads
956 or ancestors of these heads, and return a second element which
956 or ancestors of these heads, and return a second element which
957 contains all remote heads which get new children.
957 contains all remote heads which get new children.
958 """
958 """
959 if base == None:
959 if base == None:
960 base = {}
960 base = {}
961 self.findincoming(remote, base, heads, force=force)
961 self.findincoming(remote, base, heads, force=force)
962
962
963 self.ui.debug(_("common changesets up to ")
963 self.ui.debug(_("common changesets up to ")
964 + " ".join(map(short, base.keys())) + "\n")
964 + " ".join(map(short, base.keys())) + "\n")
965
965
966 remain = dict.fromkeys(self.changelog.nodemap)
966 remain = dict.fromkeys(self.changelog.nodemap)
967
967
968 # prune everything remote has from the tree
968 # prune everything remote has from the tree
969 del remain[nullid]
969 del remain[nullid]
970 remove = base.keys()
970 remove = base.keys()
971 while remove:
971 while remove:
972 n = remove.pop(0)
972 n = remove.pop(0)
973 if n in remain:
973 if n in remain:
974 del remain[n]
974 del remain[n]
975 for p in self.changelog.parents(n):
975 for p in self.changelog.parents(n):
976 remove.append(p)
976 remove.append(p)
977
977
978 # find every node whose parents have been pruned
978 # find every node whose parents have been pruned
979 subset = []
979 subset = []
980 # find every remote head that will get new children
980 # find every remote head that will get new children
981 updated_heads = {}
981 updated_heads = {}
982 for n in remain:
982 for n in remain:
983 p1, p2 = self.changelog.parents(n)
983 p1, p2 = self.changelog.parents(n)
984 if p1 not in remain and p2 not in remain:
984 if p1 not in remain and p2 not in remain:
985 subset.append(n)
985 subset.append(n)
986 if heads:
986 if heads:
987 if p1 in heads:
987 if p1 in heads:
988 updated_heads[p1] = True
988 updated_heads[p1] = True
989 if p2 in heads:
989 if p2 in heads:
990 updated_heads[p2] = True
990 updated_heads[p2] = True
991
991
992 # this is the set of all roots we have to push
992 # this is the set of all roots we have to push
993 if heads:
993 if heads:
994 return subset, updated_heads.keys()
994 return subset, updated_heads.keys()
995 else:
995 else:
996 return subset
996 return subset
997
997
998 def pull(self, remote, heads=None, force=False):
998 def pull(self, remote, heads=None, force=False):
999 l = self.lock()
999 l = self.lock()
1000
1000
1001 # if we have an empty repo, fetch everything
1001 # if we have an empty repo, fetch everything
1002 if self.changelog.tip() == nullid:
1002 if self.changelog.tip() == nullid:
1003 self.ui.status(_("requesting all changes\n"))
1003 self.ui.status(_("requesting all changes\n"))
1004 fetch = [nullid]
1004 fetch = [nullid]
1005 else:
1005 else:
1006 fetch = self.findincoming(remote, force=force)
1006 fetch = self.findincoming(remote, force=force)
1007
1007
1008 if not fetch:
1008 if not fetch:
1009 self.ui.status(_("no changes found\n"))
1009 self.ui.status(_("no changes found\n"))
1010 return 0
1010 return 0
1011
1011
1012 if heads is None:
1012 if heads is None:
1013 cg = remote.changegroup(fetch, 'pull')
1013 cg = remote.changegroup(fetch, 'pull')
1014 else:
1014 else:
1015 cg = remote.changegroupsubset(fetch, heads, 'pull')
1015 cg = remote.changegroupsubset(fetch, heads, 'pull')
1016 return self.addchangegroup(cg)
1016 return self.addchangegroup(cg)
1017
1017
1018 def push(self, remote, force=False, revs=None):
1018 def push(self, remote, force=False, revs=None):
1019 lock = remote.lock()
1019 lock = remote.lock()
1020
1020
1021 base = {}
1021 base = {}
1022 remote_heads = remote.heads()
1022 remote_heads = remote.heads()
1023 inc = self.findincoming(remote, base, remote_heads, force=force)
1023 inc = self.findincoming(remote, base, remote_heads, force=force)
1024 if not force and inc:
1024 if not force and inc:
1025 self.ui.warn(_("abort: unsynced remote changes!\n"))
1025 self.ui.warn(_("abort: unsynced remote changes!\n"))
1026 self.ui.status(_("(did you forget to sync?"
1026 self.ui.status(_("(did you forget to sync?"
1027 " use push -f to force)\n"))
1027 " use push -f to force)\n"))
1028 return 1
1028 return 1
1029
1029
1030 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1030 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1031 if revs is not None:
1031 if revs is not None:
1032 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1032 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1033 else:
1033 else:
1034 bases, heads = update, self.changelog.heads()
1034 bases, heads = update, self.changelog.heads()
1035
1035
1036 if not bases:
1036 if not bases:
1037 self.ui.status(_("no changes found\n"))
1037 self.ui.status(_("no changes found\n"))
1038 return 1
1038 return 1
1039 elif not force:
1039 elif not force:
1040 if revs is not None:
1040 if revs is not None:
1041 updated_heads = {}
1041 updated_heads = {}
1042 for base in msng_cl:
1042 for base in msng_cl:
1043 for parent in self.changelog.parents(base):
1043 for parent in self.changelog.parents(base):
1044 if parent in remote_heads:
1044 if parent in remote_heads:
1045 updated_heads[parent] = True
1045 updated_heads[parent] = True
1046 updated_heads = updated_heads.keys()
1046 updated_heads = updated_heads.keys()
1047 if len(updated_heads) < len(heads):
1047 if len(updated_heads) < len(heads):
1048 self.ui.warn(_("abort: push creates new remote branches!\n"))
1048 self.ui.warn(_("abort: push creates new remote branches!\n"))
1049 self.ui.status(_("(did you forget to merge?"
1049 self.ui.status(_("(did you forget to merge?"
1050 " use push -f to force)\n"))
1050 " use push -f to force)\n"))
1051 return 1
1051 return 1
1052
1052
1053 if revs is None:
1053 if revs is None:
1054 cg = self.changegroup(update, 'push')
1054 cg = self.changegroup(update, 'push')
1055 else:
1055 else:
1056 cg = self.changegroupsubset(update, revs, 'push')
1056 cg = self.changegroupsubset(update, revs, 'push')
1057 return remote.addchangegroup(cg)
1057 return remote.addchangegroup(cg)
1058
1058
1059 def changegroupsubset(self, bases, heads, source):
1059 def changegroupsubset(self, bases, heads, source):
1060 """This function generates a changegroup consisting of all the nodes
1060 """This function generates a changegroup consisting of all the nodes
1061 that are descendents of any of the bases, and ancestors of any of
1061 that are descendents of any of the bases, and ancestors of any of
1062 the heads.
1062 the heads.
1063
1063
1064 It is fairly complex as determining which filenodes and which
1064 It is fairly complex as determining which filenodes and which
1065 manifest nodes need to be included for the changeset to be complete
1065 manifest nodes need to be included for the changeset to be complete
1066 is non-trivial.
1066 is non-trivial.
1067
1067
1068 Another wrinkle is doing the reverse, figuring out which changeset in
1068 Another wrinkle is doing the reverse, figuring out which changeset in
1069 the changegroup a particular filenode or manifestnode belongs to."""
1069 the changegroup a particular filenode or manifestnode belongs to."""
1070
1070
1071 self.hook('preoutgoing', throw=True, source=source)
1071 self.hook('preoutgoing', throw=True, source=source)
1072
1072
1073 # Set up some initial variables
1073 # Set up some initial variables
1074 # Make it easy to refer to self.changelog
1074 # Make it easy to refer to self.changelog
1075 cl = self.changelog
1075 cl = self.changelog
1076 # msng is short for missing - compute the list of changesets in this
1076 # msng is short for missing - compute the list of changesets in this
1077 # changegroup.
1077 # changegroup.
1078 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1078 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1079 # Some bases may turn out to be superfluous, and some heads may be
1079 # Some bases may turn out to be superfluous, and some heads may be
1080 # too. nodesbetween will return the minimal set of bases and heads
1080 # too. nodesbetween will return the minimal set of bases and heads
1081 # necessary to re-create the changegroup.
1081 # necessary to re-create the changegroup.
1082
1082
1083 # Known heads are the list of heads that it is assumed the recipient
1083 # Known heads are the list of heads that it is assumed the recipient
1084 # of this changegroup will know about.
1084 # of this changegroup will know about.
1085 knownheads = {}
1085 knownheads = {}
1086 # We assume that all parents of bases are known heads.
1086 # We assume that all parents of bases are known heads.
1087 for n in bases:
1087 for n in bases:
1088 for p in cl.parents(n):
1088 for p in cl.parents(n):
1089 if p != nullid:
1089 if p != nullid:
1090 knownheads[p] = 1
1090 knownheads[p] = 1
1091 knownheads = knownheads.keys()
1091 knownheads = knownheads.keys()
1092 if knownheads:
1092 if knownheads:
1093 # Now that we know what heads are known, we can compute which
1093 # Now that we know what heads are known, we can compute which
1094 # changesets are known. The recipient must know about all
1094 # changesets are known. The recipient must know about all
1095 # changesets required to reach the known heads from the null
1095 # changesets required to reach the known heads from the null
1096 # changeset.
1096 # changeset.
1097 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1097 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1098 junk = None
1098 junk = None
1099 # Transform the list into an ersatz set.
1099 # Transform the list into an ersatz set.
1100 has_cl_set = dict.fromkeys(has_cl_set)
1100 has_cl_set = dict.fromkeys(has_cl_set)
1101 else:
1101 else:
1102 # If there were no known heads, the recipient cannot be assumed to
1102 # If there were no known heads, the recipient cannot be assumed to
1103 # know about any changesets.
1103 # know about any changesets.
1104 has_cl_set = {}
1104 has_cl_set = {}
1105
1105
1106 # Make it easy to refer to self.manifest
1106 # Make it easy to refer to self.manifest
1107 mnfst = self.manifest
1107 mnfst = self.manifest
1108 # We don't know which manifests are missing yet
1108 # We don't know which manifests are missing yet
1109 msng_mnfst_set = {}
1109 msng_mnfst_set = {}
1110 # Nor do we know which filenodes are missing.
1110 # Nor do we know which filenodes are missing.
1111 msng_filenode_set = {}
1111 msng_filenode_set = {}
1112
1112
1113 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1113 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1114 junk = None
1114 junk = None
1115
1115
1116 # A changeset always belongs to itself, so the changenode lookup
1116 # A changeset always belongs to itself, so the changenode lookup
1117 # function for a changenode is identity.
1117 # function for a changenode is identity.
1118 def identity(x):
1118 def identity(x):
1119 return x
1119 return x
1120
1120
1121 # A function generating function. Sets up an environment for the
1121 # A function generating function. Sets up an environment for the
1122 # inner function.
1122 # inner function.
1123 def cmp_by_rev_func(revlog):
1123 def cmp_by_rev_func(revlog):
1124 # Compare two nodes by their revision number in the environment's
1124 # Compare two nodes by their revision number in the environment's
1125 # revision history. Since the revision number both represents the
1125 # revision history. Since the revision number both represents the
1126 # most efficient order to read the nodes in, and represents a
1126 # most efficient order to read the nodes in, and represents a
1127 # topological sorting of the nodes, this function is often useful.
1127 # topological sorting of the nodes, this function is often useful.
1128 def cmp_by_rev(a, b):
1128 def cmp_by_rev(a, b):
1129 return cmp(revlog.rev(a), revlog.rev(b))
1129 return cmp(revlog.rev(a), revlog.rev(b))
1130 return cmp_by_rev
1130 return cmp_by_rev
1131
1131
1132 # If we determine that a particular file or manifest node must be a
1132 # If we determine that a particular file or manifest node must be a
1133 # node that the recipient of the changegroup will already have, we can
1133 # node that the recipient of the changegroup will already have, we can
1134 # also assume the recipient will have all the parents. This function
1134 # also assume the recipient will have all the parents. This function
1135 # prunes them from the set of missing nodes.
1135 # prunes them from the set of missing nodes.
1136 def prune_parents(revlog, hasset, msngset):
1136 def prune_parents(revlog, hasset, msngset):
1137 haslst = hasset.keys()
1137 haslst = hasset.keys()
1138 haslst.sort(cmp_by_rev_func(revlog))
1138 haslst.sort(cmp_by_rev_func(revlog))
1139 for node in haslst:
1139 for node in haslst:
1140 parentlst = [p for p in revlog.parents(node) if p != nullid]
1140 parentlst = [p for p in revlog.parents(node) if p != nullid]
1141 while parentlst:
1141 while parentlst:
1142 n = parentlst.pop()
1142 n = parentlst.pop()
1143 if n not in hasset:
1143 if n not in hasset:
1144 hasset[n] = 1
1144 hasset[n] = 1
1145 p = [p for p in revlog.parents(n) if p != nullid]
1145 p = [p for p in revlog.parents(n) if p != nullid]
1146 parentlst.extend(p)
1146 parentlst.extend(p)
1147 for n in hasset:
1147 for n in hasset:
1148 msngset.pop(n, None)
1148 msngset.pop(n, None)
1149
1149
1150 # This is a function generating function used to set up an environment
1150 # This is a function generating function used to set up an environment
1151 # for the inner function to execute in.
1151 # for the inner function to execute in.
1152 def manifest_and_file_collector(changedfileset):
1152 def manifest_and_file_collector(changedfileset):
1153 # This is an information gathering function that gathers
1153 # This is an information gathering function that gathers
1154 # information from each changeset node that goes out as part of
1154 # information from each changeset node that goes out as part of
1155 # the changegroup. The information gathered is a list of which
1155 # the changegroup. The information gathered is a list of which
1156 # manifest nodes are potentially required (the recipient may
1156 # manifest nodes are potentially required (the recipient may
1157 # already have them) and total list of all files which were
1157 # already have them) and total list of all files which were
1158 # changed in any changeset in the changegroup.
1158 # changed in any changeset in the changegroup.
1159 #
1159 #
1160 # We also remember the first changenode we saw any manifest
1160 # We also remember the first changenode we saw any manifest
1161 # referenced by so we can later determine which changenode 'owns'
1161 # referenced by so we can later determine which changenode 'owns'
1162 # the manifest.
1162 # the manifest.
1163 def collect_manifests_and_files(clnode):
1163 def collect_manifests_and_files(clnode):
1164 c = cl.read(clnode)
1164 c = cl.read(clnode)
1165 for f in c[3]:
1165 for f in c[3]:
1166 # This is to make sure we only have one instance of each
1166 # This is to make sure we only have one instance of each
1167 # filename string for each filename.
1167 # filename string for each filename.
1168 changedfileset.setdefault(f, f)
1168 changedfileset.setdefault(f, f)
1169 msng_mnfst_set.setdefault(c[0], clnode)
1169 msng_mnfst_set.setdefault(c[0], clnode)
1170 return collect_manifests_and_files
1170 return collect_manifests_and_files
1171
1171
1172 # Figure out which manifest nodes (of the ones we think might be part
1172 # Figure out which manifest nodes (of the ones we think might be part
1173 # of the changegroup) the recipient must know about and remove them
1173 # of the changegroup) the recipient must know about and remove them
1174 # from the changegroup.
1174 # from the changegroup.
1175 def prune_manifests():
1175 def prune_manifests():
1176 has_mnfst_set = {}
1176 has_mnfst_set = {}
1177 for n in msng_mnfst_set:
1177 for n in msng_mnfst_set:
1178 # If a 'missing' manifest thinks it belongs to a changenode
1178 # If a 'missing' manifest thinks it belongs to a changenode
1179 # the recipient is assumed to have, obviously the recipient
1179 # the recipient is assumed to have, obviously the recipient
1180 # must have that manifest.
1180 # must have that manifest.
1181 linknode = cl.node(mnfst.linkrev(n))
1181 linknode = cl.node(mnfst.linkrev(n))
1182 if linknode in has_cl_set:
1182 if linknode in has_cl_set:
1183 has_mnfst_set[n] = 1
1183 has_mnfst_set[n] = 1
1184 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1184 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1185
1185
1186 # Use the information collected in collect_manifests_and_files to say
1186 # Use the information collected in collect_manifests_and_files to say
1187 # which changenode any manifestnode belongs to.
1187 # which changenode any manifestnode belongs to.
1188 def lookup_manifest_link(mnfstnode):
1188 def lookup_manifest_link(mnfstnode):
1189 return msng_mnfst_set[mnfstnode]
1189 return msng_mnfst_set[mnfstnode]
1190
1190
1191 # A function generating function that sets up the initial environment
1191 # A function generating function that sets up the initial environment
1192 # the inner function.
1192 # the inner function.
1193 def filenode_collector(changedfiles):
1193 def filenode_collector(changedfiles):
1194 next_rev = [0]
1194 next_rev = [0]
1195 # This gathers information from each manifestnode included in the
1195 # This gathers information from each manifestnode included in the
1196 # changegroup about which filenodes the manifest node references
1196 # changegroup about which filenodes the manifest node references
1197 # so we can include those in the changegroup too.
1197 # so we can include those in the changegroup too.
1198 #
1198 #
1199 # It also remembers which changenode each filenode belongs to. It
1199 # It also remembers which changenode each filenode belongs to. It
1200 # does this by assuming the a filenode belongs to the changenode
1200 # does this by assuming the a filenode belongs to the changenode
1201 # the first manifest that references it belongs to.
1201 # the first manifest that references it belongs to.
1202 def collect_msng_filenodes(mnfstnode):
1202 def collect_msng_filenodes(mnfstnode):
1203 r = mnfst.rev(mnfstnode)
1203 r = mnfst.rev(mnfstnode)
1204 if r == next_rev[0]:
1204 if r == next_rev[0]:
1205 # If the last rev we looked at was the one just previous,
1205 # If the last rev we looked at was the one just previous,
1206 # we only need to see a diff.
1206 # we only need to see a diff.
1207 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1207 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1208 # For each line in the delta
1208 # For each line in the delta
1209 for dline in delta.splitlines():
1209 for dline in delta.splitlines():
1210 # get the filename and filenode for that line
1210 # get the filename and filenode for that line
1211 f, fnode = dline.split('\0')
1211 f, fnode = dline.split('\0')
1212 fnode = bin(fnode[:40])
1212 fnode = bin(fnode[:40])
1213 f = changedfiles.get(f, None)
1213 f = changedfiles.get(f, None)
1214 # And if the file is in the list of files we care
1214 # And if the file is in the list of files we care
1215 # about.
1215 # about.
1216 if f is not None:
1216 if f is not None:
1217 # Get the changenode this manifest belongs to
1217 # Get the changenode this manifest belongs to
1218 clnode = msng_mnfst_set[mnfstnode]
1218 clnode = msng_mnfst_set[mnfstnode]
1219 # Create the set of filenodes for the file if
1219 # Create the set of filenodes for the file if
1220 # there isn't one already.
1220 # there isn't one already.
1221 ndset = msng_filenode_set.setdefault(f, {})
1221 ndset = msng_filenode_set.setdefault(f, {})
1222 # And set the filenode's changelog node to the
1222 # And set the filenode's changelog node to the
1223 # manifest's if it hasn't been set already.
1223 # manifest's if it hasn't been set already.
1224 ndset.setdefault(fnode, clnode)
1224 ndset.setdefault(fnode, clnode)
1225 else:
1225 else:
1226 # Otherwise we need a full manifest.
1226 # Otherwise we need a full manifest.
1227 m = mnfst.read(mnfstnode)
1227 m = mnfst.read(mnfstnode)
1228 # For every file in we care about.
1228 # For every file in we care about.
1229 for f in changedfiles:
1229 for f in changedfiles:
1230 fnode = m.get(f, None)
1230 fnode = m.get(f, None)
1231 # If it's in the manifest
1231 # If it's in the manifest
1232 if fnode is not None:
1232 if fnode is not None:
1233 # See comments above.
1233 # See comments above.
1234 clnode = msng_mnfst_set[mnfstnode]
1234 clnode = msng_mnfst_set[mnfstnode]
1235 ndset = msng_filenode_set.setdefault(f, {})
1235 ndset = msng_filenode_set.setdefault(f, {})
1236 ndset.setdefault(fnode, clnode)
1236 ndset.setdefault(fnode, clnode)
1237 # Remember the revision we hope to see next.
1237 # Remember the revision we hope to see next.
1238 next_rev[0] = r + 1
1238 next_rev[0] = r + 1
1239 return collect_msng_filenodes
1239 return collect_msng_filenodes
1240
1240
1241 # We have a list of filenodes we think we need for a file, lets remove
1241 # We have a list of filenodes we think we need for a file, lets remove
1242 # all those we now the recipient must have.
1242 # all those we now the recipient must have.
1243 def prune_filenodes(f, filerevlog):
1243 def prune_filenodes(f, filerevlog):
1244 msngset = msng_filenode_set[f]
1244 msngset = msng_filenode_set[f]
1245 hasset = {}
1245 hasset = {}
1246 # If a 'missing' filenode thinks it belongs to a changenode we
1246 # If a 'missing' filenode thinks it belongs to a changenode we
1247 # assume the recipient must have, then the recipient must have
1247 # assume the recipient must have, then the recipient must have
1248 # that filenode.
1248 # that filenode.
1249 for n in msngset:
1249 for n in msngset:
1250 clnode = cl.node(filerevlog.linkrev(n))
1250 clnode = cl.node(filerevlog.linkrev(n))
1251 if clnode in has_cl_set:
1251 if clnode in has_cl_set:
1252 hasset[n] = 1
1252 hasset[n] = 1
1253 prune_parents(filerevlog, hasset, msngset)
1253 prune_parents(filerevlog, hasset, msngset)
1254
1254
1255 # A function generator function that sets up the a context for the
1255 # A function generator function that sets up the a context for the
1256 # inner function.
1256 # inner function.
1257 def lookup_filenode_link_func(fname):
1257 def lookup_filenode_link_func(fname):
1258 msngset = msng_filenode_set[fname]
1258 msngset = msng_filenode_set[fname]
1259 # Lookup the changenode the filenode belongs to.
1259 # Lookup the changenode the filenode belongs to.
1260 def lookup_filenode_link(fnode):
1260 def lookup_filenode_link(fnode):
1261 return msngset[fnode]
1261 return msngset[fnode]
1262 return lookup_filenode_link
1262 return lookup_filenode_link
1263
1263
1264 # Now that we have all theses utility functions to help out and
1264 # Now that we have all theses utility functions to help out and
1265 # logically divide up the task, generate the group.
1265 # logically divide up the task, generate the group.
1266 def gengroup():
1266 def gengroup():
1267 # The set of changed files starts empty.
1267 # The set of changed files starts empty.
1268 changedfiles = {}
1268 changedfiles = {}
1269 # Create a changenode group generator that will call our functions
1269 # Create a changenode group generator that will call our functions
1270 # back to lookup the owning changenode and collect information.
1270 # back to lookup the owning changenode and collect information.
1271 group = cl.group(msng_cl_lst, identity,
1271 group = cl.group(msng_cl_lst, identity,
1272 manifest_and_file_collector(changedfiles))
1272 manifest_and_file_collector(changedfiles))
1273 for chnk in group:
1273 for chnk in group:
1274 yield chnk
1274 yield chnk
1275
1275
1276 # The list of manifests has been collected by the generator
1276 # The list of manifests has been collected by the generator
1277 # calling our functions back.
1277 # calling our functions back.
1278 prune_manifests()
1278 prune_manifests()
1279 msng_mnfst_lst = msng_mnfst_set.keys()
1279 msng_mnfst_lst = msng_mnfst_set.keys()
1280 # Sort the manifestnodes by revision number.
1280 # Sort the manifestnodes by revision number.
1281 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1281 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1282 # Create a generator for the manifestnodes that calls our lookup
1282 # Create a generator for the manifestnodes that calls our lookup
1283 # and data collection functions back.
1283 # and data collection functions back.
1284 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1284 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1285 filenode_collector(changedfiles))
1285 filenode_collector(changedfiles))
1286 for chnk in group:
1286 for chnk in group:
1287 yield chnk
1287 yield chnk
1288
1288
1289 # These are no longer needed, dereference and toss the memory for
1289 # These are no longer needed, dereference and toss the memory for
1290 # them.
1290 # them.
1291 msng_mnfst_lst = None
1291 msng_mnfst_lst = None
1292 msng_mnfst_set.clear()
1292 msng_mnfst_set.clear()
1293
1293
1294 changedfiles = changedfiles.keys()
1294 changedfiles = changedfiles.keys()
1295 changedfiles.sort()
1295 changedfiles.sort()
1296 # Go through all our files in order sorted by name.
1296 # Go through all our files in order sorted by name.
1297 for fname in changedfiles:
1297 for fname in changedfiles:
1298 filerevlog = self.file(fname)
1298 filerevlog = self.file(fname)
1299 # Toss out the filenodes that the recipient isn't really
1299 # Toss out the filenodes that the recipient isn't really
1300 # missing.
1300 # missing.
1301 if msng_filenode_set.has_key(fname):
1301 if msng_filenode_set.has_key(fname):
1302 prune_filenodes(fname, filerevlog)
1302 prune_filenodes(fname, filerevlog)
1303 msng_filenode_lst = msng_filenode_set[fname].keys()
1303 msng_filenode_lst = msng_filenode_set[fname].keys()
1304 else:
1304 else:
1305 msng_filenode_lst = []
1305 msng_filenode_lst = []
1306 # If any filenodes are left, generate the group for them,
1306 # If any filenodes are left, generate the group for them,
1307 # otherwise don't bother.
1307 # otherwise don't bother.
1308 if len(msng_filenode_lst) > 0:
1308 if len(msng_filenode_lst) > 0:
1309 yield changegroup.genchunk(fname)
1309 yield changegroup.genchunk(fname)
1310 # Sort the filenodes by their revision #
1310 # Sort the filenodes by their revision #
1311 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1311 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1312 # Create a group generator and only pass in a changenode
1312 # Create a group generator and only pass in a changenode
1313 # lookup function as we need to collect no information
1313 # lookup function as we need to collect no information
1314 # from filenodes.
1314 # from filenodes.
1315 group = filerevlog.group(msng_filenode_lst,
1315 group = filerevlog.group(msng_filenode_lst,
1316 lookup_filenode_link_func(fname))
1316 lookup_filenode_link_func(fname))
1317 for chnk in group:
1317 for chnk in group:
1318 yield chnk
1318 yield chnk
1319 if msng_filenode_set.has_key(fname):
1319 if msng_filenode_set.has_key(fname):
1320 # Don't need this anymore, toss it to free memory.
1320 # Don't need this anymore, toss it to free memory.
1321 del msng_filenode_set[fname]
1321 del msng_filenode_set[fname]
1322 # Signal that no more groups are left.
1322 # Signal that no more groups are left.
1323 yield changegroup.closechunk()
1323 yield changegroup.closechunk()
1324
1324
1325 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1325 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1326
1326
1327 return util.chunkbuffer(gengroup())
1327 return util.chunkbuffer(gengroup())
1328
1328
1329 def changegroup(self, basenodes, source):
1329 def changegroup(self, basenodes, source):
1330 """Generate a changegroup of all nodes that we have that a recipient
1330 """Generate a changegroup of all nodes that we have that a recipient
1331 doesn't.
1331 doesn't.
1332
1332
1333 This is much easier than the previous function as we can assume that
1333 This is much easier than the previous function as we can assume that
1334 the recipient has any changenode we aren't sending them."""
1334 the recipient has any changenode we aren't sending them."""
1335
1335
1336 self.hook('preoutgoing', throw=True, source=source)
1336 self.hook('preoutgoing', throw=True, source=source)
1337
1337
1338 cl = self.changelog
1338 cl = self.changelog
1339 nodes = cl.nodesbetween(basenodes, None)[0]
1339 nodes = cl.nodesbetween(basenodes, None)[0]
1340 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1340 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1341
1341
1342 def identity(x):
1342 def identity(x):
1343 return x
1343 return x
1344
1344
1345 def gennodelst(revlog):
1345 def gennodelst(revlog):
1346 for r in xrange(0, revlog.count()):
1346 for r in xrange(0, revlog.count()):
1347 n = revlog.node(r)
1347 n = revlog.node(r)
1348 if revlog.linkrev(n) in revset:
1348 if revlog.linkrev(n) in revset:
1349 yield n
1349 yield n
1350
1350
1351 def changed_file_collector(changedfileset):
1351 def changed_file_collector(changedfileset):
1352 def collect_changed_files(clnode):
1352 def collect_changed_files(clnode):
1353 c = cl.read(clnode)
1353 c = cl.read(clnode)
1354 for fname in c[3]:
1354 for fname in c[3]:
1355 changedfileset[fname] = 1
1355 changedfileset[fname] = 1
1356 return collect_changed_files
1356 return collect_changed_files
1357
1357
1358 def lookuprevlink_func(revlog):
1358 def lookuprevlink_func(revlog):
1359 def lookuprevlink(n):
1359 def lookuprevlink(n):
1360 return cl.node(revlog.linkrev(n))
1360 return cl.node(revlog.linkrev(n))
1361 return lookuprevlink
1361 return lookuprevlink
1362
1362
1363 def gengroup():
1363 def gengroup():
1364 # construct a list of all changed files
1364 # construct a list of all changed files
1365 changedfiles = {}
1365 changedfiles = {}
1366
1366
1367 for chnk in cl.group(nodes, identity,
1367 for chnk in cl.group(nodes, identity,
1368 changed_file_collector(changedfiles)):
1368 changed_file_collector(changedfiles)):
1369 yield chnk
1369 yield chnk
1370 changedfiles = changedfiles.keys()
1370 changedfiles = changedfiles.keys()
1371 changedfiles.sort()
1371 changedfiles.sort()
1372
1372
1373 mnfst = self.manifest
1373 mnfst = self.manifest
1374 nodeiter = gennodelst(mnfst)
1374 nodeiter = gennodelst(mnfst)
1375 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1375 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1376 yield chnk
1376 yield chnk
1377
1377
1378 for fname in changedfiles:
1378 for fname in changedfiles:
1379 filerevlog = self.file(fname)
1379 filerevlog = self.file(fname)
1380 nodeiter = gennodelst(filerevlog)
1380 nodeiter = gennodelst(filerevlog)
1381 nodeiter = list(nodeiter)
1381 nodeiter = list(nodeiter)
1382 if nodeiter:
1382 if nodeiter:
1383 yield changegroup.genchunk(fname)
1383 yield changegroup.genchunk(fname)
1384 lookup = lookuprevlink_func(filerevlog)
1384 lookup = lookuprevlink_func(filerevlog)
1385 for chnk in filerevlog.group(nodeiter, lookup):
1385 for chnk in filerevlog.group(nodeiter, lookup):
1386 yield chnk
1386 yield chnk
1387
1387
1388 yield changegroup.closechunk()
1388 yield changegroup.closechunk()
1389 self.hook('outgoing', node=hex(nodes[0]), source=source)
1389
1390 if nodes:
1391 self.hook('outgoing', node=hex(nodes[0]), source=source)
1390
1392
1391 return util.chunkbuffer(gengroup())
1393 return util.chunkbuffer(gengroup())
1392
1394
1393 def addchangegroup(self, source):
1395 def addchangegroup(self, source):
1394 """add changegroup to repo.
1396 """add changegroup to repo.
1395 returns number of heads modified or added + 1."""
1397 returns number of heads modified or added + 1."""
1396
1398
1397 def csmap(x):
1399 def csmap(x):
1398 self.ui.debug(_("add changeset %s\n") % short(x))
1400 self.ui.debug(_("add changeset %s\n") % short(x))
1399 return cl.count()
1401 return cl.count()
1400
1402
1401 def revmap(x):
1403 def revmap(x):
1402 return cl.rev(x)
1404 return cl.rev(x)
1403
1405
1404 if not source:
1406 if not source:
1405 return 0
1407 return 0
1406
1408
1407 self.hook('prechangegroup', throw=True)
1409 self.hook('prechangegroup', throw=True)
1408
1410
1409 changesets = files = revisions = 0
1411 changesets = files = revisions = 0
1410
1412
1411 tr = self.transaction()
1413 tr = self.transaction()
1412
1414
1413 # write changelog and manifest data to temp files so
1415 # write changelog and manifest data to temp files so
1414 # concurrent readers will not see inconsistent view
1416 # concurrent readers will not see inconsistent view
1415 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1417 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1416
1418
1417 oldheads = len(cl.heads())
1419 oldheads = len(cl.heads())
1418
1420
1419 # pull off the changeset group
1421 # pull off the changeset group
1420 self.ui.status(_("adding changesets\n"))
1422 self.ui.status(_("adding changesets\n"))
1421 co = cl.tip()
1423 co = cl.tip()
1422 chunkiter = changegroup.chunkiter(source)
1424 chunkiter = changegroup.chunkiter(source)
1423 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1425 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1424 cnr, cor = map(cl.rev, (cn, co))
1426 cnr, cor = map(cl.rev, (cn, co))
1425 if cn == nullid:
1427 if cn == nullid:
1426 cnr = cor
1428 cnr = cor
1427 changesets = cnr - cor
1429 changesets = cnr - cor
1428
1430
1429 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1431 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1430
1432
1431 # pull off the manifest group
1433 # pull off the manifest group
1432 self.ui.status(_("adding manifests\n"))
1434 self.ui.status(_("adding manifests\n"))
1433 mm = mf.tip()
1435 mm = mf.tip()
1434 chunkiter = changegroup.chunkiter(source)
1436 chunkiter = changegroup.chunkiter(source)
1435 mo = mf.addgroup(chunkiter, revmap, tr)
1437 mo = mf.addgroup(chunkiter, revmap, tr)
1436
1438
1437 # process the files
1439 # process the files
1438 self.ui.status(_("adding file changes\n"))
1440 self.ui.status(_("adding file changes\n"))
1439 while 1:
1441 while 1:
1440 f = changegroup.getchunk(source)
1442 f = changegroup.getchunk(source)
1441 if not f:
1443 if not f:
1442 break
1444 break
1443 self.ui.debug(_("adding %s revisions\n") % f)
1445 self.ui.debug(_("adding %s revisions\n") % f)
1444 fl = self.file(f)
1446 fl = self.file(f)
1445 o = fl.count()
1447 o = fl.count()
1446 chunkiter = changegroup.chunkiter(source)
1448 chunkiter = changegroup.chunkiter(source)
1447 n = fl.addgroup(chunkiter, revmap, tr)
1449 n = fl.addgroup(chunkiter, revmap, tr)
1448 revisions += fl.count() - o
1450 revisions += fl.count() - o
1449 files += 1
1451 files += 1
1450
1452
1451 # write order here is important so concurrent readers will see
1453 # write order here is important so concurrent readers will see
1452 # consistent view of repo
1454 # consistent view of repo
1453 mf.writedata()
1455 mf.writedata()
1454 cl.writedata()
1456 cl.writedata()
1455
1457
1456 # make changelog and manifest see real files again
1458 # make changelog and manifest see real files again
1457 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1459 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1458 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1460 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1459 self.changelog.checkinlinesize(tr)
1461 self.changelog.checkinlinesize(tr)
1460 self.manifest.checkinlinesize(tr)
1462 self.manifest.checkinlinesize(tr)
1461
1463
1462 newheads = len(self.changelog.heads())
1464 newheads = len(self.changelog.heads())
1463 heads = ""
1465 heads = ""
1464 if oldheads and newheads > oldheads:
1466 if oldheads and newheads > oldheads:
1465 heads = _(" (+%d heads)") % (newheads - oldheads)
1467 heads = _(" (+%d heads)") % (newheads - oldheads)
1466
1468
1467 self.ui.status(_("added %d changesets"
1469 self.ui.status(_("added %d changesets"
1468 " with %d changes to %d files%s\n")
1470 " with %d changes to %d files%s\n")
1469 % (changesets, revisions, files, heads))
1471 % (changesets, revisions, files, heads))
1470
1472
1471 self.hook('pretxnchangegroup', throw=True,
1473 self.hook('pretxnchangegroup', throw=True,
1472 node=hex(self.changelog.node(cor+1)))
1474 node=hex(self.changelog.node(cor+1)))
1473
1475
1474 tr.close()
1476 tr.close()
1475
1477
1476 if changesets > 0:
1478 if changesets > 0:
1477 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1479 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1478
1480
1479 for i in range(cor + 1, cnr + 1):
1481 for i in range(cor + 1, cnr + 1):
1480 self.hook("incoming", node=hex(self.changelog.node(i)))
1482 self.hook("incoming", node=hex(self.changelog.node(i)))
1481
1483
1482 return newheads - oldheads + 1
1484 return newheads - oldheads + 1
1483
1485
1484 def update(self, node, allow=False, force=False, choose=None,
1486 def update(self, node, allow=False, force=False, choose=None,
1485 moddirstate=True, forcemerge=False, wlock=None):
1487 moddirstate=True, forcemerge=False, wlock=None):
1486 pl = self.dirstate.parents()
1488 pl = self.dirstate.parents()
1487 if not force and pl[1] != nullid:
1489 if not force and pl[1] != nullid:
1488 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1490 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1489 return 1
1491 return 1
1490
1492
1491 err = False
1493 err = False
1492
1494
1493 p1, p2 = pl[0], node
1495 p1, p2 = pl[0], node
1494 pa = self.changelog.ancestor(p1, p2)
1496 pa = self.changelog.ancestor(p1, p2)
1495 m1n = self.changelog.read(p1)[0]
1497 m1n = self.changelog.read(p1)[0]
1496 m2n = self.changelog.read(p2)[0]
1498 m2n = self.changelog.read(p2)[0]
1497 man = self.manifest.ancestor(m1n, m2n)
1499 man = self.manifest.ancestor(m1n, m2n)
1498 m1 = self.manifest.read(m1n)
1500 m1 = self.manifest.read(m1n)
1499 mf1 = self.manifest.readflags(m1n)
1501 mf1 = self.manifest.readflags(m1n)
1500 m2 = self.manifest.read(m2n).copy()
1502 m2 = self.manifest.read(m2n).copy()
1501 mf2 = self.manifest.readflags(m2n)
1503 mf2 = self.manifest.readflags(m2n)
1502 ma = self.manifest.read(man)
1504 ma = self.manifest.read(man)
1503 mfa = self.manifest.readflags(man)
1505 mfa = self.manifest.readflags(man)
1504
1506
1505 modified, added, removed, deleted, unknown = self.changes()
1507 modified, added, removed, deleted, unknown = self.changes()
1506
1508
1507 # is this a jump, or a merge? i.e. is there a linear path
1509 # is this a jump, or a merge? i.e. is there a linear path
1508 # from p1 to p2?
1510 # from p1 to p2?
1509 linear_path = (pa == p1 or pa == p2)
1511 linear_path = (pa == p1 or pa == p2)
1510
1512
1511 if allow and linear_path:
1513 if allow and linear_path:
1512 raise util.Abort(_("there is nothing to merge, "
1514 raise util.Abort(_("there is nothing to merge, "
1513 "just use 'hg update'"))
1515 "just use 'hg update'"))
1514 if allow and not forcemerge:
1516 if allow and not forcemerge:
1515 if modified or added or removed:
1517 if modified or added or removed:
1516 raise util.Abort(_("outstanding uncommitted changes"))
1518 raise util.Abort(_("outstanding uncommitted changes"))
1517 if not forcemerge and not force:
1519 if not forcemerge and not force:
1518 for f in unknown:
1520 for f in unknown:
1519 if f in m2:
1521 if f in m2:
1520 t1 = self.wread(f)
1522 t1 = self.wread(f)
1521 t2 = self.file(f).read(m2[f])
1523 t2 = self.file(f).read(m2[f])
1522 if cmp(t1, t2) != 0:
1524 if cmp(t1, t2) != 0:
1523 raise util.Abort(_("'%s' already exists in the working"
1525 raise util.Abort(_("'%s' already exists in the working"
1524 " dir and differs from remote") % f)
1526 " dir and differs from remote") % f)
1525
1527
1526 # resolve the manifest to determine which files
1528 # resolve the manifest to determine which files
1527 # we care about merging
1529 # we care about merging
1528 self.ui.note(_("resolving manifests\n"))
1530 self.ui.note(_("resolving manifests\n"))
1529 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1531 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1530 (force, allow, moddirstate, linear_path))
1532 (force, allow, moddirstate, linear_path))
1531 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1533 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1532 (short(man), short(m1n), short(m2n)))
1534 (short(man), short(m1n), short(m2n)))
1533
1535
1534 merge = {}
1536 merge = {}
1535 get = {}
1537 get = {}
1536 remove = []
1538 remove = []
1537
1539
1538 # construct a working dir manifest
1540 # construct a working dir manifest
1539 mw = m1.copy()
1541 mw = m1.copy()
1540 mfw = mf1.copy()
1542 mfw = mf1.copy()
1541 umap = dict.fromkeys(unknown)
1543 umap = dict.fromkeys(unknown)
1542
1544
1543 for f in added + modified + unknown:
1545 for f in added + modified + unknown:
1544 mw[f] = ""
1546 mw[f] = ""
1545 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1547 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1546
1548
1547 if moddirstate and not wlock:
1549 if moddirstate and not wlock:
1548 wlock = self.wlock()
1550 wlock = self.wlock()
1549
1551
1550 for f in deleted + removed:
1552 for f in deleted + removed:
1551 if f in mw:
1553 if f in mw:
1552 del mw[f]
1554 del mw[f]
1553
1555
1554 # If we're jumping between revisions (as opposed to merging),
1556 # If we're jumping between revisions (as opposed to merging),
1555 # and if neither the working directory nor the target rev has
1557 # and if neither the working directory nor the target rev has
1556 # the file, then we need to remove it from the dirstate, to
1558 # the file, then we need to remove it from the dirstate, to
1557 # prevent the dirstate from listing the file when it is no
1559 # prevent the dirstate from listing the file when it is no
1558 # longer in the manifest.
1560 # longer in the manifest.
1559 if moddirstate and linear_path and f not in m2:
1561 if moddirstate and linear_path and f not in m2:
1560 self.dirstate.forget((f,))
1562 self.dirstate.forget((f,))
1561
1563
1562 # Compare manifests
1564 # Compare manifests
1563 for f, n in mw.iteritems():
1565 for f, n in mw.iteritems():
1564 if choose and not choose(f):
1566 if choose and not choose(f):
1565 continue
1567 continue
1566 if f in m2:
1568 if f in m2:
1567 s = 0
1569 s = 0
1568
1570
1569 # is the wfile new since m1, and match m2?
1571 # is the wfile new since m1, and match m2?
1570 if f not in m1:
1572 if f not in m1:
1571 t1 = self.wread(f)
1573 t1 = self.wread(f)
1572 t2 = self.file(f).read(m2[f])
1574 t2 = self.file(f).read(m2[f])
1573 if cmp(t1, t2) == 0:
1575 if cmp(t1, t2) == 0:
1574 n = m2[f]
1576 n = m2[f]
1575 del t1, t2
1577 del t1, t2
1576
1578
1577 # are files different?
1579 # are files different?
1578 if n != m2[f]:
1580 if n != m2[f]:
1579 a = ma.get(f, nullid)
1581 a = ma.get(f, nullid)
1580 # are both different from the ancestor?
1582 # are both different from the ancestor?
1581 if n != a and m2[f] != a:
1583 if n != a and m2[f] != a:
1582 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1584 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1583 # merge executable bits
1585 # merge executable bits
1584 # "if we changed or they changed, change in merge"
1586 # "if we changed or they changed, change in merge"
1585 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1587 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1586 mode = ((a^b) | (a^c)) ^ a
1588 mode = ((a^b) | (a^c)) ^ a
1587 merge[f] = (m1.get(f, nullid), m2[f], mode)
1589 merge[f] = (m1.get(f, nullid), m2[f], mode)
1588 s = 1
1590 s = 1
1589 # are we clobbering?
1591 # are we clobbering?
1590 # is remote's version newer?
1592 # is remote's version newer?
1591 # or are we going back in time?
1593 # or are we going back in time?
1592 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1594 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1593 self.ui.debug(_(" remote %s is newer, get\n") % f)
1595 self.ui.debug(_(" remote %s is newer, get\n") % f)
1594 get[f] = m2[f]
1596 get[f] = m2[f]
1595 s = 1
1597 s = 1
1596 elif f in umap or f in added:
1598 elif f in umap or f in added:
1597 # this unknown file is the same as the checkout
1599 # this unknown file is the same as the checkout
1598 # we need to reset the dirstate if the file was added
1600 # we need to reset the dirstate if the file was added
1599 get[f] = m2[f]
1601 get[f] = m2[f]
1600
1602
1601 if not s and mfw[f] != mf2[f]:
1603 if not s and mfw[f] != mf2[f]:
1602 if force:
1604 if force:
1603 self.ui.debug(_(" updating permissions for %s\n") % f)
1605 self.ui.debug(_(" updating permissions for %s\n") % f)
1604 util.set_exec(self.wjoin(f), mf2[f])
1606 util.set_exec(self.wjoin(f), mf2[f])
1605 else:
1607 else:
1606 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1608 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1607 mode = ((a^b) | (a^c)) ^ a
1609 mode = ((a^b) | (a^c)) ^ a
1608 if mode != b:
1610 if mode != b:
1609 self.ui.debug(_(" updating permissions for %s\n")
1611 self.ui.debug(_(" updating permissions for %s\n")
1610 % f)
1612 % f)
1611 util.set_exec(self.wjoin(f), mode)
1613 util.set_exec(self.wjoin(f), mode)
1612 del m2[f]
1614 del m2[f]
1613 elif f in ma:
1615 elif f in ma:
1614 if n != ma[f]:
1616 if n != ma[f]:
1615 r = _("d")
1617 r = _("d")
1616 if not force and (linear_path or allow):
1618 if not force and (linear_path or allow):
1617 r = self.ui.prompt(
1619 r = self.ui.prompt(
1618 (_(" local changed %s which remote deleted\n") % f) +
1620 (_(" local changed %s which remote deleted\n") % f) +
1619 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1621 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1620 if r == _("d"):
1622 if r == _("d"):
1621 remove.append(f)
1623 remove.append(f)
1622 else:
1624 else:
1623 self.ui.debug(_("other deleted %s\n") % f)
1625 self.ui.debug(_("other deleted %s\n") % f)
1624 remove.append(f) # other deleted it
1626 remove.append(f) # other deleted it
1625 else:
1627 else:
1626 # file is created on branch or in working directory
1628 # file is created on branch or in working directory
1627 if force and f not in umap:
1629 if force and f not in umap:
1628 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1630 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1629 remove.append(f)
1631 remove.append(f)
1630 elif n == m1.get(f, nullid): # same as parent
1632 elif n == m1.get(f, nullid): # same as parent
1631 if p2 == pa: # going backwards?
1633 if p2 == pa: # going backwards?
1632 self.ui.debug(_("remote deleted %s\n") % f)
1634 self.ui.debug(_("remote deleted %s\n") % f)
1633 remove.append(f)
1635 remove.append(f)
1634 else:
1636 else:
1635 self.ui.debug(_("local modified %s, keeping\n") % f)
1637 self.ui.debug(_("local modified %s, keeping\n") % f)
1636 else:
1638 else:
1637 self.ui.debug(_("working dir created %s, keeping\n") % f)
1639 self.ui.debug(_("working dir created %s, keeping\n") % f)
1638
1640
1639 for f, n in m2.iteritems():
1641 for f, n in m2.iteritems():
1640 if choose and not choose(f):
1642 if choose and not choose(f):
1641 continue
1643 continue
1642 if f[0] == "/":
1644 if f[0] == "/":
1643 continue
1645 continue
1644 if f in ma and n != ma[f]:
1646 if f in ma and n != ma[f]:
1645 r = _("k")
1647 r = _("k")
1646 if not force and (linear_path or allow):
1648 if not force and (linear_path or allow):
1647 r = self.ui.prompt(
1649 r = self.ui.prompt(
1648 (_("remote changed %s which local deleted\n") % f) +
1650 (_("remote changed %s which local deleted\n") % f) +
1649 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1651 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1650 if r == _("k"):
1652 if r == _("k"):
1651 get[f] = n
1653 get[f] = n
1652 elif f not in ma:
1654 elif f not in ma:
1653 self.ui.debug(_("remote created %s\n") % f)
1655 self.ui.debug(_("remote created %s\n") % f)
1654 get[f] = n
1656 get[f] = n
1655 else:
1657 else:
1656 if force or p2 == pa: # going backwards?
1658 if force or p2 == pa: # going backwards?
1657 self.ui.debug(_("local deleted %s, recreating\n") % f)
1659 self.ui.debug(_("local deleted %s, recreating\n") % f)
1658 get[f] = n
1660 get[f] = n
1659 else:
1661 else:
1660 self.ui.debug(_("local deleted %s\n") % f)
1662 self.ui.debug(_("local deleted %s\n") % f)
1661
1663
1662 del mw, m1, m2, ma
1664 del mw, m1, m2, ma
1663
1665
1664 if force:
1666 if force:
1665 for f in merge:
1667 for f in merge:
1666 get[f] = merge[f][1]
1668 get[f] = merge[f][1]
1667 merge = {}
1669 merge = {}
1668
1670
1669 if linear_path or force:
1671 if linear_path or force:
1670 # we don't need to do any magic, just jump to the new rev
1672 # we don't need to do any magic, just jump to the new rev
1671 branch_merge = False
1673 branch_merge = False
1672 p1, p2 = p2, nullid
1674 p1, p2 = p2, nullid
1673 else:
1675 else:
1674 if not allow:
1676 if not allow:
1675 self.ui.status(_("this update spans a branch"
1677 self.ui.status(_("this update spans a branch"
1676 " affecting the following files:\n"))
1678 " affecting the following files:\n"))
1677 fl = merge.keys() + get.keys()
1679 fl = merge.keys() + get.keys()
1678 fl.sort()
1680 fl.sort()
1679 for f in fl:
1681 for f in fl:
1680 cf = ""
1682 cf = ""
1681 if f in merge:
1683 if f in merge:
1682 cf = _(" (resolve)")
1684 cf = _(" (resolve)")
1683 self.ui.status(" %s%s\n" % (f, cf))
1685 self.ui.status(" %s%s\n" % (f, cf))
1684 self.ui.warn(_("aborting update spanning branches!\n"))
1686 self.ui.warn(_("aborting update spanning branches!\n"))
1685 self.ui.status(_("(use 'hg merge' to merge across branches"
1687 self.ui.status(_("(use 'hg merge' to merge across branches"
1686 " or 'hg update -C' to lose changes)\n"))
1688 " or 'hg update -C' to lose changes)\n"))
1687 return 1
1689 return 1
1688 branch_merge = True
1690 branch_merge = True
1689
1691
1690 # get the files we don't need to change
1692 # get the files we don't need to change
1691 files = get.keys()
1693 files = get.keys()
1692 files.sort()
1694 files.sort()
1693 for f in files:
1695 for f in files:
1694 if f[0] == "/":
1696 if f[0] == "/":
1695 continue
1697 continue
1696 self.ui.note(_("getting %s\n") % f)
1698 self.ui.note(_("getting %s\n") % f)
1697 t = self.file(f).read(get[f])
1699 t = self.file(f).read(get[f])
1698 self.wwrite(f, t)
1700 self.wwrite(f, t)
1699 util.set_exec(self.wjoin(f), mf2[f])
1701 util.set_exec(self.wjoin(f), mf2[f])
1700 if moddirstate:
1702 if moddirstate:
1701 if branch_merge:
1703 if branch_merge:
1702 self.dirstate.update([f], 'n', st_mtime=-1)
1704 self.dirstate.update([f], 'n', st_mtime=-1)
1703 else:
1705 else:
1704 self.dirstate.update([f], 'n')
1706 self.dirstate.update([f], 'n')
1705
1707
1706 # merge the tricky bits
1708 # merge the tricky bits
1707 failedmerge = []
1709 failedmerge = []
1708 files = merge.keys()
1710 files = merge.keys()
1709 files.sort()
1711 files.sort()
1710 xp1 = hex(p1)
1712 xp1 = hex(p1)
1711 xp2 = hex(p2)
1713 xp2 = hex(p2)
1712 for f in files:
1714 for f in files:
1713 self.ui.status(_("merging %s\n") % f)
1715 self.ui.status(_("merging %s\n") % f)
1714 my, other, flag = merge[f]
1716 my, other, flag = merge[f]
1715 ret = self.merge3(f, my, other, xp1, xp2)
1717 ret = self.merge3(f, my, other, xp1, xp2)
1716 if ret:
1718 if ret:
1717 err = True
1719 err = True
1718 failedmerge.append(f)
1720 failedmerge.append(f)
1719 util.set_exec(self.wjoin(f), flag)
1721 util.set_exec(self.wjoin(f), flag)
1720 if moddirstate:
1722 if moddirstate:
1721 if branch_merge:
1723 if branch_merge:
1722 # We've done a branch merge, mark this file as merged
1724 # We've done a branch merge, mark this file as merged
1723 # so that we properly record the merger later
1725 # so that we properly record the merger later
1724 self.dirstate.update([f], 'm')
1726 self.dirstate.update([f], 'm')
1725 else:
1727 else:
1726 # We've update-merged a locally modified file, so
1728 # We've update-merged a locally modified file, so
1727 # we set the dirstate to emulate a normal checkout
1729 # we set the dirstate to emulate a normal checkout
1728 # of that file some time in the past. Thus our
1730 # of that file some time in the past. Thus our
1729 # merge will appear as a normal local file
1731 # merge will appear as a normal local file
1730 # modification.
1732 # modification.
1731 f_len = len(self.file(f).read(other))
1733 f_len = len(self.file(f).read(other))
1732 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1734 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1733
1735
1734 remove.sort()
1736 remove.sort()
1735 for f in remove:
1737 for f in remove:
1736 self.ui.note(_("removing %s\n") % f)
1738 self.ui.note(_("removing %s\n") % f)
1737 util.audit_path(f)
1739 util.audit_path(f)
1738 try:
1740 try:
1739 util.unlink(self.wjoin(f))
1741 util.unlink(self.wjoin(f))
1740 except OSError, inst:
1742 except OSError, inst:
1741 if inst.errno != errno.ENOENT:
1743 if inst.errno != errno.ENOENT:
1742 self.ui.warn(_("update failed to remove %s: %s!\n") %
1744 self.ui.warn(_("update failed to remove %s: %s!\n") %
1743 (f, inst.strerror))
1745 (f, inst.strerror))
1744 if moddirstate:
1746 if moddirstate:
1745 if branch_merge:
1747 if branch_merge:
1746 self.dirstate.update(remove, 'r')
1748 self.dirstate.update(remove, 'r')
1747 else:
1749 else:
1748 self.dirstate.forget(remove)
1750 self.dirstate.forget(remove)
1749
1751
1750 if moddirstate:
1752 if moddirstate:
1751 self.dirstate.setparents(p1, p2)
1753 self.dirstate.setparents(p1, p2)
1752
1754
1753 stat = ((len(get), _("updated")),
1755 stat = ((len(get), _("updated")),
1754 (len(merge) - len(failedmerge), _("merged")),
1756 (len(merge) - len(failedmerge), _("merged")),
1755 (len(remove), _("removed")),
1757 (len(remove), _("removed")),
1756 (len(failedmerge), _("unresolved")))
1758 (len(failedmerge), _("unresolved")))
1757 note = ", ".join([_("%d files %s") % s for s in stat])
1759 note = ", ".join([_("%d files %s") % s for s in stat])
1758 self.ui.note("%s\n" % note)
1760 self.ui.note("%s\n" % note)
1759 if moddirstate and branch_merge:
1761 if moddirstate and branch_merge:
1760 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1762 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1761
1763
1762 return err
1764 return err
1763
1765
1764 def merge3(self, fn, my, other, p1, p2):
1766 def merge3(self, fn, my, other, p1, p2):
1765 """perform a 3-way merge in the working directory"""
1767 """perform a 3-way merge in the working directory"""
1766
1768
1767 def temp(prefix, node):
1769 def temp(prefix, node):
1768 pre = "%s~%s." % (os.path.basename(fn), prefix)
1770 pre = "%s~%s." % (os.path.basename(fn), prefix)
1769 (fd, name) = tempfile.mkstemp("", pre)
1771 (fd, name) = tempfile.mkstemp("", pre)
1770 f = os.fdopen(fd, "wb")
1772 f = os.fdopen(fd, "wb")
1771 self.wwrite(fn, fl.read(node), f)
1773 self.wwrite(fn, fl.read(node), f)
1772 f.close()
1774 f.close()
1773 return name
1775 return name
1774
1776
1775 fl = self.file(fn)
1777 fl = self.file(fn)
1776 base = fl.ancestor(my, other)
1778 base = fl.ancestor(my, other)
1777 a = self.wjoin(fn)
1779 a = self.wjoin(fn)
1778 b = temp("base", base)
1780 b = temp("base", base)
1779 c = temp("other", other)
1781 c = temp("other", other)
1780
1782
1781 self.ui.note(_("resolving %s\n") % fn)
1783 self.ui.note(_("resolving %s\n") % fn)
1782 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1784 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1783 (fn, short(my), short(other), short(base)))
1785 (fn, short(my), short(other), short(base)))
1784
1786
1785 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1787 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1786 or "hgmerge")
1788 or "hgmerge")
1787 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1789 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1788 environ={'HG_FILE': fn,
1790 environ={'HG_FILE': fn,
1789 'HG_MY_NODE': p1,
1791 'HG_MY_NODE': p1,
1790 'HG_OTHER_NODE': p2,
1792 'HG_OTHER_NODE': p2,
1791 'HG_FILE_MY_NODE': hex(my),
1793 'HG_FILE_MY_NODE': hex(my),
1792 'HG_FILE_OTHER_NODE': hex(other),
1794 'HG_FILE_OTHER_NODE': hex(other),
1793 'HG_FILE_BASE_NODE': hex(base)})
1795 'HG_FILE_BASE_NODE': hex(base)})
1794 if r:
1796 if r:
1795 self.ui.warn(_("merging %s failed!\n") % fn)
1797 self.ui.warn(_("merging %s failed!\n") % fn)
1796
1798
1797 os.unlink(b)
1799 os.unlink(b)
1798 os.unlink(c)
1800 os.unlink(c)
1799 return r
1801 return r
1800
1802
1801 def verify(self):
1803 def verify(self):
1802 filelinkrevs = {}
1804 filelinkrevs = {}
1803 filenodes = {}
1805 filenodes = {}
1804 changesets = revisions = files = 0
1806 changesets = revisions = files = 0
1805 errors = [0]
1807 errors = [0]
1806 neededmanifests = {}
1808 neededmanifests = {}
1807
1809
1808 def err(msg):
1810 def err(msg):
1809 self.ui.warn(msg + "\n")
1811 self.ui.warn(msg + "\n")
1810 errors[0] += 1
1812 errors[0] += 1
1811
1813
1812 def checksize(obj, name):
1814 def checksize(obj, name):
1813 d = obj.checksize()
1815 d = obj.checksize()
1814 if d[0]:
1816 if d[0]:
1815 err(_("%s data length off by %d bytes") % (name, d[0]))
1817 err(_("%s data length off by %d bytes") % (name, d[0]))
1816 if d[1]:
1818 if d[1]:
1817 err(_("%s index contains %d extra bytes") % (name, d[1]))
1819 err(_("%s index contains %d extra bytes") % (name, d[1]))
1818
1820
1819 seen = {}
1821 seen = {}
1820 self.ui.status(_("checking changesets\n"))
1822 self.ui.status(_("checking changesets\n"))
1821 checksize(self.changelog, "changelog")
1823 checksize(self.changelog, "changelog")
1822
1824
1823 for i in range(self.changelog.count()):
1825 for i in range(self.changelog.count()):
1824 changesets += 1
1826 changesets += 1
1825 n = self.changelog.node(i)
1827 n = self.changelog.node(i)
1826 l = self.changelog.linkrev(n)
1828 l = self.changelog.linkrev(n)
1827 if l != i:
1829 if l != i:
1828 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1830 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1829 if n in seen:
1831 if n in seen:
1830 err(_("duplicate changeset at revision %d") % i)
1832 err(_("duplicate changeset at revision %d") % i)
1831 seen[n] = 1
1833 seen[n] = 1
1832
1834
1833 for p in self.changelog.parents(n):
1835 for p in self.changelog.parents(n):
1834 if p not in self.changelog.nodemap:
1836 if p not in self.changelog.nodemap:
1835 err(_("changeset %s has unknown parent %s") %
1837 err(_("changeset %s has unknown parent %s") %
1836 (short(n), short(p)))
1838 (short(n), short(p)))
1837 try:
1839 try:
1838 changes = self.changelog.read(n)
1840 changes = self.changelog.read(n)
1839 except KeyboardInterrupt:
1841 except KeyboardInterrupt:
1840 self.ui.warn(_("interrupted"))
1842 self.ui.warn(_("interrupted"))
1841 raise
1843 raise
1842 except Exception, inst:
1844 except Exception, inst:
1843 err(_("unpacking changeset %s: %s") % (short(n), inst))
1845 err(_("unpacking changeset %s: %s") % (short(n), inst))
1844 continue
1846 continue
1845
1847
1846 neededmanifests[changes[0]] = n
1848 neededmanifests[changes[0]] = n
1847
1849
1848 for f in changes[3]:
1850 for f in changes[3]:
1849 filelinkrevs.setdefault(f, []).append(i)
1851 filelinkrevs.setdefault(f, []).append(i)
1850
1852
1851 seen = {}
1853 seen = {}
1852 self.ui.status(_("checking manifests\n"))
1854 self.ui.status(_("checking manifests\n"))
1853 checksize(self.manifest, "manifest")
1855 checksize(self.manifest, "manifest")
1854
1856
1855 for i in range(self.manifest.count()):
1857 for i in range(self.manifest.count()):
1856 n = self.manifest.node(i)
1858 n = self.manifest.node(i)
1857 l = self.manifest.linkrev(n)
1859 l = self.manifest.linkrev(n)
1858
1860
1859 if l < 0 or l >= self.changelog.count():
1861 if l < 0 or l >= self.changelog.count():
1860 err(_("bad manifest link (%d) at revision %d") % (l, i))
1862 err(_("bad manifest link (%d) at revision %d") % (l, i))
1861
1863
1862 if n in neededmanifests:
1864 if n in neededmanifests:
1863 del neededmanifests[n]
1865 del neededmanifests[n]
1864
1866
1865 if n in seen:
1867 if n in seen:
1866 err(_("duplicate manifest at revision %d") % i)
1868 err(_("duplicate manifest at revision %d") % i)
1867
1869
1868 seen[n] = 1
1870 seen[n] = 1
1869
1871
1870 for p in self.manifest.parents(n):
1872 for p in self.manifest.parents(n):
1871 if p not in self.manifest.nodemap:
1873 if p not in self.manifest.nodemap:
1872 err(_("manifest %s has unknown parent %s") %
1874 err(_("manifest %s has unknown parent %s") %
1873 (short(n), short(p)))
1875 (short(n), short(p)))
1874
1876
1875 try:
1877 try:
1876 delta = mdiff.patchtext(self.manifest.delta(n))
1878 delta = mdiff.patchtext(self.manifest.delta(n))
1877 except KeyboardInterrupt:
1879 except KeyboardInterrupt:
1878 self.ui.warn(_("interrupted"))
1880 self.ui.warn(_("interrupted"))
1879 raise
1881 raise
1880 except Exception, inst:
1882 except Exception, inst:
1881 err(_("unpacking manifest %s: %s") % (short(n), inst))
1883 err(_("unpacking manifest %s: %s") % (short(n), inst))
1882 continue
1884 continue
1883
1885
1884 try:
1886 try:
1885 ff = [ l.split('\0') for l in delta.splitlines() ]
1887 ff = [ l.split('\0') for l in delta.splitlines() ]
1886 for f, fn in ff:
1888 for f, fn in ff:
1887 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1889 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1888 except (ValueError, TypeError), inst:
1890 except (ValueError, TypeError), inst:
1889 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1891 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1890
1892
1891 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1893 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1892
1894
1893 for m, c in neededmanifests.items():
1895 for m, c in neededmanifests.items():
1894 err(_("Changeset %s refers to unknown manifest %s") %
1896 err(_("Changeset %s refers to unknown manifest %s") %
1895 (short(m), short(c)))
1897 (short(m), short(c)))
1896 del neededmanifests
1898 del neededmanifests
1897
1899
1898 for f in filenodes:
1900 for f in filenodes:
1899 if f not in filelinkrevs:
1901 if f not in filelinkrevs:
1900 err(_("file %s in manifest but not in changesets") % f)
1902 err(_("file %s in manifest but not in changesets") % f)
1901
1903
1902 for f in filelinkrevs:
1904 for f in filelinkrevs:
1903 if f not in filenodes:
1905 if f not in filenodes:
1904 err(_("file %s in changeset but not in manifest") % f)
1906 err(_("file %s in changeset but not in manifest") % f)
1905
1907
1906 self.ui.status(_("checking files\n"))
1908 self.ui.status(_("checking files\n"))
1907 ff = filenodes.keys()
1909 ff = filenodes.keys()
1908 ff.sort()
1910 ff.sort()
1909 for f in ff:
1911 for f in ff:
1910 if f == "/dev/null":
1912 if f == "/dev/null":
1911 continue
1913 continue
1912 files += 1
1914 files += 1
1913 if not f:
1915 if not f:
1914 err(_("file without name in manifest %s") % short(n))
1916 err(_("file without name in manifest %s") % short(n))
1915 continue
1917 continue
1916 fl = self.file(f)
1918 fl = self.file(f)
1917 checksize(fl, f)
1919 checksize(fl, f)
1918
1920
1919 nodes = {nullid: 1}
1921 nodes = {nullid: 1}
1920 seen = {}
1922 seen = {}
1921 for i in range(fl.count()):
1923 for i in range(fl.count()):
1922 revisions += 1
1924 revisions += 1
1923 n = fl.node(i)
1925 n = fl.node(i)
1924
1926
1925 if n in seen:
1927 if n in seen:
1926 err(_("%s: duplicate revision %d") % (f, i))
1928 err(_("%s: duplicate revision %d") % (f, i))
1927 if n not in filenodes[f]:
1929 if n not in filenodes[f]:
1928 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1930 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1929 else:
1931 else:
1930 del filenodes[f][n]
1932 del filenodes[f][n]
1931
1933
1932 flr = fl.linkrev(n)
1934 flr = fl.linkrev(n)
1933 if flr not in filelinkrevs.get(f, []):
1935 if flr not in filelinkrevs.get(f, []):
1934 err(_("%s:%s points to unexpected changeset %d")
1936 err(_("%s:%s points to unexpected changeset %d")
1935 % (f, short(n), flr))
1937 % (f, short(n), flr))
1936 else:
1938 else:
1937 filelinkrevs[f].remove(flr)
1939 filelinkrevs[f].remove(flr)
1938
1940
1939 # verify contents
1941 # verify contents
1940 try:
1942 try:
1941 t = fl.read(n)
1943 t = fl.read(n)
1942 except KeyboardInterrupt:
1944 except KeyboardInterrupt:
1943 self.ui.warn(_("interrupted"))
1945 self.ui.warn(_("interrupted"))
1944 raise
1946 raise
1945 except Exception, inst:
1947 except Exception, inst:
1946 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1948 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1947
1949
1948 # verify parents
1950 # verify parents
1949 (p1, p2) = fl.parents(n)
1951 (p1, p2) = fl.parents(n)
1950 if p1 not in nodes:
1952 if p1 not in nodes:
1951 err(_("file %s:%s unknown parent 1 %s") %
1953 err(_("file %s:%s unknown parent 1 %s") %
1952 (f, short(n), short(p1)))
1954 (f, short(n), short(p1)))
1953 if p2 not in nodes:
1955 if p2 not in nodes:
1954 err(_("file %s:%s unknown parent 2 %s") %
1956 err(_("file %s:%s unknown parent 2 %s") %
1955 (f, short(n), short(p1)))
1957 (f, short(n), short(p1)))
1956 nodes[n] = 1
1958 nodes[n] = 1
1957
1959
1958 # cross-check
1960 # cross-check
1959 for node in filenodes[f]:
1961 for node in filenodes[f]:
1960 err(_("node %s in manifests not in %s") % (hex(node), f))
1962 err(_("node %s in manifests not in %s") % (hex(node), f))
1961
1963
1962 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1964 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1963 (files, changesets, revisions))
1965 (files, changesets, revisions))
1964
1966
1965 if errors[0]:
1967 if errors[0]:
1966 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1968 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1967 return 1
1969 return 1
1968
1970
1969 # used to avoid circular references so destructors work
1971 # used to avoid circular references so destructors work
1970 def aftertrans(base):
1972 def aftertrans(base):
1971 p = base
1973 p = base
1972 def a():
1974 def a():
1973 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1975 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1974 util.rename(os.path.join(p, "journal.dirstate"),
1976 util.rename(os.path.join(p, "journal.dirstate"),
1975 os.path.join(p, "undo.dirstate"))
1977 os.path.join(p, "undo.dirstate"))
1976 return a
1978 return a
1977
1979
General Comments 0
You need to be logged in to leave comments. Login now