##// END OF EJS Templates
fix update when a locally added file match the target revision...
Benoit Boissinot -
r2065:2ff37e3b default
parent child Browse files
Show More
@@ -1,1956 +1,1957 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "appendfile changegroup")
15
15
16 class localrepository(object):
16 class localrepository(object):
17 def __del__(self):
17 def __del__(self):
18 self.transhandle = None
18 self.transhandle = None
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 if not path:
20 if not path:
21 p = os.getcwd()
21 p = os.getcwd()
22 while not os.path.isdir(os.path.join(p, ".hg")):
22 while not os.path.isdir(os.path.join(p, ".hg")):
23 oldp = p
23 oldp = p
24 p = os.path.dirname(p)
24 p = os.path.dirname(p)
25 if p == oldp:
25 if p == oldp:
26 raise repo.RepoError(_("no repo found"))
26 raise repo.RepoError(_("no repo found"))
27 path = p
27 path = p
28 self.path = os.path.join(path, ".hg")
28 self.path = os.path.join(path, ".hg")
29
29
30 if not create and not os.path.isdir(self.path):
30 if not create and not os.path.isdir(self.path):
31 raise repo.RepoError(_("repository %s not found") % path)
31 raise repo.RepoError(_("repository %s not found") % path)
32
32
33 self.root = os.path.abspath(path)
33 self.root = os.path.abspath(path)
34 self.origroot = path
34 self.origroot = path
35 self.ui = ui.ui(parentui=parentui)
35 self.ui = ui.ui(parentui=parentui)
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38 self.manifest = manifest.manifest(self.opener)
38 self.manifest = manifest.manifest(self.opener)
39 self.changelog = changelog.changelog(self.opener)
39 self.changelog = changelog.changelog(self.opener)
40 self.tagscache = None
40 self.tagscache = None
41 self.nodetagscache = None
41 self.nodetagscache = None
42 self.encodepats = None
42 self.encodepats = None
43 self.decodepats = None
43 self.decodepats = None
44 self.transhandle = None
44 self.transhandle = None
45
45
46 if create:
46 if create:
47 os.mkdir(self.path)
47 os.mkdir(self.path)
48 os.mkdir(self.join("data"))
48 os.mkdir(self.join("data"))
49
49
50 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
50 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
51 try:
51 try:
52 self.ui.readconfig(self.join("hgrc"), self.root)
52 self.ui.readconfig(self.join("hgrc"), self.root)
53 except IOError:
53 except IOError:
54 pass
54 pass
55
55
56 def hook(self, name, throw=False, **args):
56 def hook(self, name, throw=False, **args):
57 def runhook(name, cmd):
57 def runhook(name, cmd):
58 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
58 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
59 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
59 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
60 [(k.upper(), v) for k, v in args.iteritems()])
60 [(k.upper(), v) for k, v in args.iteritems()])
61 r = util.system(cmd, environ=env, cwd=self.root)
61 r = util.system(cmd, environ=env, cwd=self.root)
62 if r:
62 if r:
63 desc, r = util.explain_exit(r)
63 desc, r = util.explain_exit(r)
64 if throw:
64 if throw:
65 raise util.Abort(_('%s hook %s') % (name, desc))
65 raise util.Abort(_('%s hook %s') % (name, desc))
66 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
66 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
67 return False
67 return False
68 return True
68 return True
69
69
70 r = True
70 r = True
71 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
71 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
72 if hname.split(".", 1)[0] == name and cmd]
72 if hname.split(".", 1)[0] == name and cmd]
73 hooks.sort()
73 hooks.sort()
74 for hname, cmd in hooks:
74 for hname, cmd in hooks:
75 r = runhook(hname, cmd) and r
75 r = runhook(hname, cmd) and r
76 return r
76 return r
77
77
78 def tags(self):
78 def tags(self):
79 '''return a mapping of tag to node'''
79 '''return a mapping of tag to node'''
80 if not self.tagscache:
80 if not self.tagscache:
81 self.tagscache = {}
81 self.tagscache = {}
82
82
83 def parsetag(line, context):
83 def parsetag(line, context):
84 if not line:
84 if not line:
85 return
85 return
86 s = l.split(" ", 1)
86 s = l.split(" ", 1)
87 if len(s) != 2:
87 if len(s) != 2:
88 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
88 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
89 return
89 return
90 node, key = s
90 node, key = s
91 try:
91 try:
92 bin_n = bin(node)
92 bin_n = bin(node)
93 except TypeError:
93 except TypeError:
94 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
94 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
95 return
95 return
96 if bin_n not in self.changelog.nodemap:
96 if bin_n not in self.changelog.nodemap:
97 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
97 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
98 return
98 return
99 self.tagscache[key.strip()] = bin_n
99 self.tagscache[key.strip()] = bin_n
100
100
101 # read each head of the tags file, ending with the tip
101 # read each head of the tags file, ending with the tip
102 # and add each tag found to the map, with "newer" ones
102 # and add each tag found to the map, with "newer" ones
103 # taking precedence
103 # taking precedence
104 fl = self.file(".hgtags")
104 fl = self.file(".hgtags")
105 h = fl.heads()
105 h = fl.heads()
106 h.reverse()
106 h.reverse()
107 for r in h:
107 for r in h:
108 count = 0
108 count = 0
109 for l in fl.read(r).splitlines():
109 for l in fl.read(r).splitlines():
110 count += 1
110 count += 1
111 parsetag(l, ".hgtags:%d" % count)
111 parsetag(l, ".hgtags:%d" % count)
112
112
113 try:
113 try:
114 f = self.opener("localtags")
114 f = self.opener("localtags")
115 count = 0
115 count = 0
116 for l in f:
116 for l in f:
117 count += 1
117 count += 1
118 parsetag(l, "localtags:%d" % count)
118 parsetag(l, "localtags:%d" % count)
119 except IOError:
119 except IOError:
120 pass
120 pass
121
121
122 self.tagscache['tip'] = self.changelog.tip()
122 self.tagscache['tip'] = self.changelog.tip()
123
123
124 return self.tagscache
124 return self.tagscache
125
125
126 def tagslist(self):
126 def tagslist(self):
127 '''return a list of tags ordered by revision'''
127 '''return a list of tags ordered by revision'''
128 l = []
128 l = []
129 for t, n in self.tags().items():
129 for t, n in self.tags().items():
130 try:
130 try:
131 r = self.changelog.rev(n)
131 r = self.changelog.rev(n)
132 except:
132 except:
133 r = -2 # sort to the beginning of the list if unknown
133 r = -2 # sort to the beginning of the list if unknown
134 l.append((r, t, n))
134 l.append((r, t, n))
135 l.sort()
135 l.sort()
136 return [(t, n) for r, t, n in l]
136 return [(t, n) for r, t, n in l]
137
137
138 def nodetags(self, node):
138 def nodetags(self, node):
139 '''return the tags associated with a node'''
139 '''return the tags associated with a node'''
140 if not self.nodetagscache:
140 if not self.nodetagscache:
141 self.nodetagscache = {}
141 self.nodetagscache = {}
142 for t, n in self.tags().items():
142 for t, n in self.tags().items():
143 self.nodetagscache.setdefault(n, []).append(t)
143 self.nodetagscache.setdefault(n, []).append(t)
144 return self.nodetagscache.get(node, [])
144 return self.nodetagscache.get(node, [])
145
145
146 def lookup(self, key):
146 def lookup(self, key):
147 try:
147 try:
148 return self.tags()[key]
148 return self.tags()[key]
149 except KeyError:
149 except KeyError:
150 try:
150 try:
151 return self.changelog.lookup(key)
151 return self.changelog.lookup(key)
152 except:
152 except:
153 raise repo.RepoError(_("unknown revision '%s'") % key)
153 raise repo.RepoError(_("unknown revision '%s'") % key)
154
154
155 def dev(self):
155 def dev(self):
156 return os.stat(self.path).st_dev
156 return os.stat(self.path).st_dev
157
157
158 def local(self):
158 def local(self):
159 return True
159 return True
160
160
161 def join(self, f):
161 def join(self, f):
162 return os.path.join(self.path, f)
162 return os.path.join(self.path, f)
163
163
164 def wjoin(self, f):
164 def wjoin(self, f):
165 return os.path.join(self.root, f)
165 return os.path.join(self.root, f)
166
166
167 def file(self, f):
167 def file(self, f):
168 if f[0] == '/':
168 if f[0] == '/':
169 f = f[1:]
169 f = f[1:]
170 return filelog.filelog(self.opener, f)
170 return filelog.filelog(self.opener, f)
171
171
172 def getcwd(self):
172 def getcwd(self):
173 return self.dirstate.getcwd()
173 return self.dirstate.getcwd()
174
174
175 def wfile(self, f, mode='r'):
175 def wfile(self, f, mode='r'):
176 return self.wopener(f, mode)
176 return self.wopener(f, mode)
177
177
178 def wread(self, filename):
178 def wread(self, filename):
179 if self.encodepats == None:
179 if self.encodepats == None:
180 l = []
180 l = []
181 for pat, cmd in self.ui.configitems("encode"):
181 for pat, cmd in self.ui.configitems("encode"):
182 mf = util.matcher(self.root, "", [pat], [], [])[1]
182 mf = util.matcher(self.root, "", [pat], [], [])[1]
183 l.append((mf, cmd))
183 l.append((mf, cmd))
184 self.encodepats = l
184 self.encodepats = l
185
185
186 data = self.wopener(filename, 'r').read()
186 data = self.wopener(filename, 'r').read()
187
187
188 for mf, cmd in self.encodepats:
188 for mf, cmd in self.encodepats:
189 if mf(filename):
189 if mf(filename):
190 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
190 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
191 data = util.filter(data, cmd)
191 data = util.filter(data, cmd)
192 break
192 break
193
193
194 return data
194 return data
195
195
196 def wwrite(self, filename, data, fd=None):
196 def wwrite(self, filename, data, fd=None):
197 if self.decodepats == None:
197 if self.decodepats == None:
198 l = []
198 l = []
199 for pat, cmd in self.ui.configitems("decode"):
199 for pat, cmd in self.ui.configitems("decode"):
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
201 l.append((mf, cmd))
201 l.append((mf, cmd))
202 self.decodepats = l
202 self.decodepats = l
203
203
204 for mf, cmd in self.decodepats:
204 for mf, cmd in self.decodepats:
205 if mf(filename):
205 if mf(filename):
206 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
206 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
207 data = util.filter(data, cmd)
207 data = util.filter(data, cmd)
208 break
208 break
209
209
210 if fd:
210 if fd:
211 return fd.write(data)
211 return fd.write(data)
212 return self.wopener(filename, 'w').write(data)
212 return self.wopener(filename, 'w').write(data)
213
213
214 def transaction(self):
214 def transaction(self):
215 tr = self.transhandle
215 tr = self.transhandle
216 if tr != None and tr.running():
216 if tr != None and tr.running():
217 return tr.nest()
217 return tr.nest()
218
218
219 # save dirstate for undo
219 # save dirstate for undo
220 try:
220 try:
221 ds = self.opener("dirstate").read()
221 ds = self.opener("dirstate").read()
222 except IOError:
222 except IOError:
223 ds = ""
223 ds = ""
224 self.opener("journal.dirstate", "w").write(ds)
224 self.opener("journal.dirstate", "w").write(ds)
225
225
226 tr = transaction.transaction(self.ui.warn, self.opener,
226 tr = transaction.transaction(self.ui.warn, self.opener,
227 self.join("journal"),
227 self.join("journal"),
228 aftertrans(self.path))
228 aftertrans(self.path))
229 self.transhandle = tr
229 self.transhandle = tr
230 return tr
230 return tr
231
231
232 def recover(self):
232 def recover(self):
233 l = self.lock()
233 l = self.lock()
234 if os.path.exists(self.join("journal")):
234 if os.path.exists(self.join("journal")):
235 self.ui.status(_("rolling back interrupted transaction\n"))
235 self.ui.status(_("rolling back interrupted transaction\n"))
236 transaction.rollback(self.opener, self.join("journal"))
236 transaction.rollback(self.opener, self.join("journal"))
237 self.reload()
237 self.reload()
238 return True
238 return True
239 else:
239 else:
240 self.ui.warn(_("no interrupted transaction available\n"))
240 self.ui.warn(_("no interrupted transaction available\n"))
241 return False
241 return False
242
242
243 def undo(self, wlock=None):
243 def undo(self, wlock=None):
244 if not wlock:
244 if not wlock:
245 wlock = self.wlock()
245 wlock = self.wlock()
246 l = self.lock()
246 l = self.lock()
247 if os.path.exists(self.join("undo")):
247 if os.path.exists(self.join("undo")):
248 self.ui.status(_("rolling back last transaction\n"))
248 self.ui.status(_("rolling back last transaction\n"))
249 transaction.rollback(self.opener, self.join("undo"))
249 transaction.rollback(self.opener, self.join("undo"))
250 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
250 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
251 self.reload()
251 self.reload()
252 self.wreload()
252 self.wreload()
253 else:
253 else:
254 self.ui.warn(_("no undo information available\n"))
254 self.ui.warn(_("no undo information available\n"))
255
255
256 def wreload(self):
256 def wreload(self):
257 self.dirstate.read()
257 self.dirstate.read()
258
258
259 def reload(self):
259 def reload(self):
260 self.changelog.load()
260 self.changelog.load()
261 self.manifest.load()
261 self.manifest.load()
262 self.tagscache = None
262 self.tagscache = None
263 self.nodetagscache = None
263 self.nodetagscache = None
264
264
265 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
265 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
266 desc=None):
266 desc=None):
267 try:
267 try:
268 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
268 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
269 except lock.LockHeld, inst:
269 except lock.LockHeld, inst:
270 if not wait:
270 if not wait:
271 raise
271 raise
272 self.ui.warn(_("waiting for lock on %s held by %s\n") %
272 self.ui.warn(_("waiting for lock on %s held by %s\n") %
273 (desc, inst.args[0]))
273 (desc, inst.args[0]))
274 # default to 600 seconds timeout
274 # default to 600 seconds timeout
275 l = lock.lock(self.join(lockname),
275 l = lock.lock(self.join(lockname),
276 int(self.ui.config("ui", "timeout") or 600),
276 int(self.ui.config("ui", "timeout") or 600),
277 releasefn, desc=desc)
277 releasefn, desc=desc)
278 if acquirefn:
278 if acquirefn:
279 acquirefn()
279 acquirefn()
280 return l
280 return l
281
281
282 def lock(self, wait=1):
282 def lock(self, wait=1):
283 return self.do_lock("lock", wait, acquirefn=self.reload,
283 return self.do_lock("lock", wait, acquirefn=self.reload,
284 desc=_('repository %s') % self.origroot)
284 desc=_('repository %s') % self.origroot)
285
285
286 def wlock(self, wait=1):
286 def wlock(self, wait=1):
287 return self.do_lock("wlock", wait, self.dirstate.write,
287 return self.do_lock("wlock", wait, self.dirstate.write,
288 self.wreload,
288 self.wreload,
289 desc=_('working directory of %s') % self.origroot)
289 desc=_('working directory of %s') % self.origroot)
290
290
291 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
291 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
292 "determine whether a new filenode is needed"
292 "determine whether a new filenode is needed"
293 fp1 = manifest1.get(filename, nullid)
293 fp1 = manifest1.get(filename, nullid)
294 fp2 = manifest2.get(filename, nullid)
294 fp2 = manifest2.get(filename, nullid)
295
295
296 if fp2 != nullid:
296 if fp2 != nullid:
297 # is one parent an ancestor of the other?
297 # is one parent an ancestor of the other?
298 fpa = filelog.ancestor(fp1, fp2)
298 fpa = filelog.ancestor(fp1, fp2)
299 if fpa == fp1:
299 if fpa == fp1:
300 fp1, fp2 = fp2, nullid
300 fp1, fp2 = fp2, nullid
301 elif fpa == fp2:
301 elif fpa == fp2:
302 fp2 = nullid
302 fp2 = nullid
303
303
304 # is the file unmodified from the parent? report existing entry
304 # is the file unmodified from the parent? report existing entry
305 if fp2 == nullid and text == filelog.read(fp1):
305 if fp2 == nullid and text == filelog.read(fp1):
306 return (fp1, None, None)
306 return (fp1, None, None)
307
307
308 return (None, fp1, fp2)
308 return (None, fp1, fp2)
309
309
310 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
310 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
311 orig_parent = self.dirstate.parents()[0] or nullid
311 orig_parent = self.dirstate.parents()[0] or nullid
312 p1 = p1 or self.dirstate.parents()[0] or nullid
312 p1 = p1 or self.dirstate.parents()[0] or nullid
313 p2 = p2 or self.dirstate.parents()[1] or nullid
313 p2 = p2 or self.dirstate.parents()[1] or nullid
314 c1 = self.changelog.read(p1)
314 c1 = self.changelog.read(p1)
315 c2 = self.changelog.read(p2)
315 c2 = self.changelog.read(p2)
316 m1 = self.manifest.read(c1[0])
316 m1 = self.manifest.read(c1[0])
317 mf1 = self.manifest.readflags(c1[0])
317 mf1 = self.manifest.readflags(c1[0])
318 m2 = self.manifest.read(c2[0])
318 m2 = self.manifest.read(c2[0])
319 changed = []
319 changed = []
320
320
321 if orig_parent == p1:
321 if orig_parent == p1:
322 update_dirstate = 1
322 update_dirstate = 1
323 else:
323 else:
324 update_dirstate = 0
324 update_dirstate = 0
325
325
326 if not wlock:
326 if not wlock:
327 wlock = self.wlock()
327 wlock = self.wlock()
328 l = self.lock()
328 l = self.lock()
329 tr = self.transaction()
329 tr = self.transaction()
330 mm = m1.copy()
330 mm = m1.copy()
331 mfm = mf1.copy()
331 mfm = mf1.copy()
332 linkrev = self.changelog.count()
332 linkrev = self.changelog.count()
333 for f in files:
333 for f in files:
334 try:
334 try:
335 t = self.wread(f)
335 t = self.wread(f)
336 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
336 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
337 r = self.file(f)
337 r = self.file(f)
338 mfm[f] = tm
338 mfm[f] = tm
339
339
340 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
340 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
341 if entry:
341 if entry:
342 mm[f] = entry
342 mm[f] = entry
343 continue
343 continue
344
344
345 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
345 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
346 changed.append(f)
346 changed.append(f)
347 if update_dirstate:
347 if update_dirstate:
348 self.dirstate.update([f], "n")
348 self.dirstate.update([f], "n")
349 except IOError:
349 except IOError:
350 try:
350 try:
351 del mm[f]
351 del mm[f]
352 del mfm[f]
352 del mfm[f]
353 if update_dirstate:
353 if update_dirstate:
354 self.dirstate.forget([f])
354 self.dirstate.forget([f])
355 except:
355 except:
356 # deleted from p2?
356 # deleted from p2?
357 pass
357 pass
358
358
359 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
359 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
360 user = user or self.ui.username()
360 user = user or self.ui.username()
361 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
361 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
362 tr.close()
362 tr.close()
363 if update_dirstate:
363 if update_dirstate:
364 self.dirstate.setparents(n, nullid)
364 self.dirstate.setparents(n, nullid)
365
365
366 def commit(self, files=None, text="", user=None, date=None,
366 def commit(self, files=None, text="", user=None, date=None,
367 match=util.always, force=False, lock=None, wlock=None):
367 match=util.always, force=False, lock=None, wlock=None):
368 commit = []
368 commit = []
369 remove = []
369 remove = []
370 changed = []
370 changed = []
371
371
372 if files:
372 if files:
373 for f in files:
373 for f in files:
374 s = self.dirstate.state(f)
374 s = self.dirstate.state(f)
375 if s in 'nmai':
375 if s in 'nmai':
376 commit.append(f)
376 commit.append(f)
377 elif s == 'r':
377 elif s == 'r':
378 remove.append(f)
378 remove.append(f)
379 else:
379 else:
380 self.ui.warn(_("%s not tracked!\n") % f)
380 self.ui.warn(_("%s not tracked!\n") % f)
381 else:
381 else:
382 modified, added, removed, deleted, unknown = self.changes(match=match)
382 modified, added, removed, deleted, unknown = self.changes(match=match)
383 commit = modified + added
383 commit = modified + added
384 remove = removed
384 remove = removed
385
385
386 p1, p2 = self.dirstate.parents()
386 p1, p2 = self.dirstate.parents()
387 c1 = self.changelog.read(p1)
387 c1 = self.changelog.read(p1)
388 c2 = self.changelog.read(p2)
388 c2 = self.changelog.read(p2)
389 m1 = self.manifest.read(c1[0])
389 m1 = self.manifest.read(c1[0])
390 mf1 = self.manifest.readflags(c1[0])
390 mf1 = self.manifest.readflags(c1[0])
391 m2 = self.manifest.read(c2[0])
391 m2 = self.manifest.read(c2[0])
392
392
393 if not commit and not remove and not force and p2 == nullid:
393 if not commit and not remove and not force and p2 == nullid:
394 self.ui.status(_("nothing changed\n"))
394 self.ui.status(_("nothing changed\n"))
395 return None
395 return None
396
396
397 xp1 = hex(p1)
397 xp1 = hex(p1)
398 if p2 == nullid: xp2 = ''
398 if p2 == nullid: xp2 = ''
399 else: xp2 = hex(p2)
399 else: xp2 = hex(p2)
400
400
401 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
401 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
402
402
403 if not wlock:
403 if not wlock:
404 wlock = self.wlock()
404 wlock = self.wlock()
405 if not lock:
405 if not lock:
406 lock = self.lock()
406 lock = self.lock()
407 tr = self.transaction()
407 tr = self.transaction()
408
408
409 # check in files
409 # check in files
410 new = {}
410 new = {}
411 linkrev = self.changelog.count()
411 linkrev = self.changelog.count()
412 commit.sort()
412 commit.sort()
413 for f in commit:
413 for f in commit:
414 self.ui.note(f + "\n")
414 self.ui.note(f + "\n")
415 try:
415 try:
416 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
416 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
417 t = self.wread(f)
417 t = self.wread(f)
418 except IOError:
418 except IOError:
419 self.ui.warn(_("trouble committing %s!\n") % f)
419 self.ui.warn(_("trouble committing %s!\n") % f)
420 raise
420 raise
421
421
422 r = self.file(f)
422 r = self.file(f)
423
423
424 meta = {}
424 meta = {}
425 cp = self.dirstate.copied(f)
425 cp = self.dirstate.copied(f)
426 if cp:
426 if cp:
427 meta["copy"] = cp
427 meta["copy"] = cp
428 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
428 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
429 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
429 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
430 fp1, fp2 = nullid, nullid
430 fp1, fp2 = nullid, nullid
431 else:
431 else:
432 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
432 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
433 if entry:
433 if entry:
434 new[f] = entry
434 new[f] = entry
435 continue
435 continue
436
436
437 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
437 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
438 # remember what we've added so that we can later calculate
438 # remember what we've added so that we can later calculate
439 # the files to pull from a set of changesets
439 # the files to pull from a set of changesets
440 changed.append(f)
440 changed.append(f)
441
441
442 # update manifest
442 # update manifest
443 m1 = m1.copy()
443 m1 = m1.copy()
444 m1.update(new)
444 m1.update(new)
445 for f in remove:
445 for f in remove:
446 if f in m1:
446 if f in m1:
447 del m1[f]
447 del m1[f]
448 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
448 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
449 (new, remove))
449 (new, remove))
450
450
451 # add changeset
451 # add changeset
452 new = new.keys()
452 new = new.keys()
453 new.sort()
453 new.sort()
454
454
455 user = user or self.ui.username()
455 user = user or self.ui.username()
456 if not text:
456 if not text:
457 edittext = [""]
457 edittext = [""]
458 if p2 != nullid:
458 if p2 != nullid:
459 edittext.append("HG: branch merge")
459 edittext.append("HG: branch merge")
460 edittext.extend(["HG: changed %s" % f for f in changed])
460 edittext.extend(["HG: changed %s" % f for f in changed])
461 edittext.extend(["HG: removed %s" % f for f in remove])
461 edittext.extend(["HG: removed %s" % f for f in remove])
462 if not changed and not remove:
462 if not changed and not remove:
463 edittext.append("HG: no files changed")
463 edittext.append("HG: no files changed")
464 edittext.append("")
464 edittext.append("")
465 # run editor in the repository root
465 # run editor in the repository root
466 olddir = os.getcwd()
466 olddir = os.getcwd()
467 os.chdir(self.root)
467 os.chdir(self.root)
468 edittext = self.ui.edit("\n".join(edittext), user)
468 edittext = self.ui.edit("\n".join(edittext), user)
469 os.chdir(olddir)
469 os.chdir(olddir)
470 if not edittext.rstrip():
470 if not edittext.rstrip():
471 return None
471 return None
472 text = edittext
472 text = edittext
473
473
474 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
474 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
475 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
475 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
476 parent2=xp2)
476 parent2=xp2)
477 tr.close()
477 tr.close()
478
478
479 self.dirstate.setparents(n)
479 self.dirstate.setparents(n)
480 self.dirstate.update(new, "n")
480 self.dirstate.update(new, "n")
481 self.dirstate.forget(remove)
481 self.dirstate.forget(remove)
482
482
483 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
483 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
484 return n
484 return n
485
485
486 def walk(self, node=None, files=[], match=util.always, badmatch=None):
486 def walk(self, node=None, files=[], match=util.always, badmatch=None):
487 if node:
487 if node:
488 fdict = dict.fromkeys(files)
488 fdict = dict.fromkeys(files)
489 for fn in self.manifest.read(self.changelog.read(node)[0]):
489 for fn in self.manifest.read(self.changelog.read(node)[0]):
490 fdict.pop(fn, None)
490 fdict.pop(fn, None)
491 if match(fn):
491 if match(fn):
492 yield 'm', fn
492 yield 'm', fn
493 for fn in fdict:
493 for fn in fdict:
494 if badmatch and badmatch(fn):
494 if badmatch and badmatch(fn):
495 if match(fn):
495 if match(fn):
496 yield 'b', fn
496 yield 'b', fn
497 else:
497 else:
498 self.ui.warn(_('%s: No such file in rev %s\n') % (
498 self.ui.warn(_('%s: No such file in rev %s\n') % (
499 util.pathto(self.getcwd(), fn), short(node)))
499 util.pathto(self.getcwd(), fn), short(node)))
500 else:
500 else:
501 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
501 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
502 yield src, fn
502 yield src, fn
503
503
504 def changes(self, node1=None, node2=None, files=[], match=util.always,
504 def changes(self, node1=None, node2=None, files=[], match=util.always,
505 wlock=None, show_ignored=None):
505 wlock=None, show_ignored=None):
506 """return changes between two nodes or node and working directory
506 """return changes between two nodes or node and working directory
507
507
508 If node1 is None, use the first dirstate parent instead.
508 If node1 is None, use the first dirstate parent instead.
509 If node2 is None, compare node1 with working directory.
509 If node2 is None, compare node1 with working directory.
510 """
510 """
511
511
512 def fcmp(fn, mf):
512 def fcmp(fn, mf):
513 t1 = self.wread(fn)
513 t1 = self.wread(fn)
514 t2 = self.file(fn).read(mf.get(fn, nullid))
514 t2 = self.file(fn).read(mf.get(fn, nullid))
515 return cmp(t1, t2)
515 return cmp(t1, t2)
516
516
517 def mfmatches(node):
517 def mfmatches(node):
518 change = self.changelog.read(node)
518 change = self.changelog.read(node)
519 mf = dict(self.manifest.read(change[0]))
519 mf = dict(self.manifest.read(change[0]))
520 for fn in mf.keys():
520 for fn in mf.keys():
521 if not match(fn):
521 if not match(fn):
522 del mf[fn]
522 del mf[fn]
523 return mf
523 return mf
524
524
525 if node1:
525 if node1:
526 # read the manifest from node1 before the manifest from node2,
526 # read the manifest from node1 before the manifest from node2,
527 # so that we'll hit the manifest cache if we're going through
527 # so that we'll hit the manifest cache if we're going through
528 # all the revisions in parent->child order.
528 # all the revisions in parent->child order.
529 mf1 = mfmatches(node1)
529 mf1 = mfmatches(node1)
530
530
531 # are we comparing the working directory?
531 # are we comparing the working directory?
532 if not node2:
532 if not node2:
533 if not wlock:
533 if not wlock:
534 try:
534 try:
535 wlock = self.wlock(wait=0)
535 wlock = self.wlock(wait=0)
536 except lock.LockException:
536 except lock.LockException:
537 wlock = None
537 wlock = None
538 lookup, modified, added, removed, deleted, unknown, ignored = (
538 lookup, modified, added, removed, deleted, unknown, ignored = (
539 self.dirstate.changes(files, match, show_ignored))
539 self.dirstate.changes(files, match, show_ignored))
540
540
541 # are we comparing working dir against its parent?
541 # are we comparing working dir against its parent?
542 if not node1:
542 if not node1:
543 if lookup:
543 if lookup:
544 # do a full compare of any files that might have changed
544 # do a full compare of any files that might have changed
545 mf2 = mfmatches(self.dirstate.parents()[0])
545 mf2 = mfmatches(self.dirstate.parents()[0])
546 for f in lookup:
546 for f in lookup:
547 if fcmp(f, mf2):
547 if fcmp(f, mf2):
548 modified.append(f)
548 modified.append(f)
549 elif wlock is not None:
549 elif wlock is not None:
550 self.dirstate.update([f], "n")
550 self.dirstate.update([f], "n")
551 else:
551 else:
552 # we are comparing working dir against non-parent
552 # we are comparing working dir against non-parent
553 # generate a pseudo-manifest for the working dir
553 # generate a pseudo-manifest for the working dir
554 mf2 = mfmatches(self.dirstate.parents()[0])
554 mf2 = mfmatches(self.dirstate.parents()[0])
555 for f in lookup + modified + added:
555 for f in lookup + modified + added:
556 mf2[f] = ""
556 mf2[f] = ""
557 for f in removed:
557 for f in removed:
558 if f in mf2:
558 if f in mf2:
559 del mf2[f]
559 del mf2[f]
560 else:
560 else:
561 # we are comparing two revisions
561 # we are comparing two revisions
562 deleted, unknown, ignored = [], [], []
562 deleted, unknown, ignored = [], [], []
563 mf2 = mfmatches(node2)
563 mf2 = mfmatches(node2)
564
564
565 if node1:
565 if node1:
566 # flush lists from dirstate before comparing manifests
566 # flush lists from dirstate before comparing manifests
567 modified, added = [], []
567 modified, added = [], []
568
568
569 for fn in mf2:
569 for fn in mf2:
570 if mf1.has_key(fn):
570 if mf1.has_key(fn):
571 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
571 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
572 modified.append(fn)
572 modified.append(fn)
573 del mf1[fn]
573 del mf1[fn]
574 else:
574 else:
575 added.append(fn)
575 added.append(fn)
576
576
577 removed = mf1.keys()
577 removed = mf1.keys()
578
578
579 # sort and return results:
579 # sort and return results:
580 for l in modified, added, removed, deleted, unknown, ignored:
580 for l in modified, added, removed, deleted, unknown, ignored:
581 l.sort()
581 l.sort()
582 if show_ignored is None:
582 if show_ignored is None:
583 return (modified, added, removed, deleted, unknown)
583 return (modified, added, removed, deleted, unknown)
584 else:
584 else:
585 return (modified, added, removed, deleted, unknown, ignored)
585 return (modified, added, removed, deleted, unknown, ignored)
586
586
587 def add(self, list, wlock=None):
587 def add(self, list, wlock=None):
588 if not wlock:
588 if not wlock:
589 wlock = self.wlock()
589 wlock = self.wlock()
590 for f in list:
590 for f in list:
591 p = self.wjoin(f)
591 p = self.wjoin(f)
592 if not os.path.exists(p):
592 if not os.path.exists(p):
593 self.ui.warn(_("%s does not exist!\n") % f)
593 self.ui.warn(_("%s does not exist!\n") % f)
594 elif not os.path.isfile(p):
594 elif not os.path.isfile(p):
595 self.ui.warn(_("%s not added: only files supported currently\n")
595 self.ui.warn(_("%s not added: only files supported currently\n")
596 % f)
596 % f)
597 elif self.dirstate.state(f) in 'an':
597 elif self.dirstate.state(f) in 'an':
598 self.ui.warn(_("%s already tracked!\n") % f)
598 self.ui.warn(_("%s already tracked!\n") % f)
599 else:
599 else:
600 self.dirstate.update([f], "a")
600 self.dirstate.update([f], "a")
601
601
602 def forget(self, list, wlock=None):
602 def forget(self, list, wlock=None):
603 if not wlock:
603 if not wlock:
604 wlock = self.wlock()
604 wlock = self.wlock()
605 for f in list:
605 for f in list:
606 if self.dirstate.state(f) not in 'ai':
606 if self.dirstate.state(f) not in 'ai':
607 self.ui.warn(_("%s not added!\n") % f)
607 self.ui.warn(_("%s not added!\n") % f)
608 else:
608 else:
609 self.dirstate.forget([f])
609 self.dirstate.forget([f])
610
610
611 def remove(self, list, unlink=False, wlock=None):
611 def remove(self, list, unlink=False, wlock=None):
612 if unlink:
612 if unlink:
613 for f in list:
613 for f in list:
614 try:
614 try:
615 util.unlink(self.wjoin(f))
615 util.unlink(self.wjoin(f))
616 except OSError, inst:
616 except OSError, inst:
617 if inst.errno != errno.ENOENT:
617 if inst.errno != errno.ENOENT:
618 raise
618 raise
619 if not wlock:
619 if not wlock:
620 wlock = self.wlock()
620 wlock = self.wlock()
621 for f in list:
621 for f in list:
622 p = self.wjoin(f)
622 p = self.wjoin(f)
623 if os.path.exists(p):
623 if os.path.exists(p):
624 self.ui.warn(_("%s still exists!\n") % f)
624 self.ui.warn(_("%s still exists!\n") % f)
625 elif self.dirstate.state(f) == 'a':
625 elif self.dirstate.state(f) == 'a':
626 self.dirstate.forget([f])
626 self.dirstate.forget([f])
627 elif f not in self.dirstate:
627 elif f not in self.dirstate:
628 self.ui.warn(_("%s not tracked!\n") % f)
628 self.ui.warn(_("%s not tracked!\n") % f)
629 else:
629 else:
630 self.dirstate.update([f], "r")
630 self.dirstate.update([f], "r")
631
631
632 def undelete(self, list, wlock=None):
632 def undelete(self, list, wlock=None):
633 p = self.dirstate.parents()[0]
633 p = self.dirstate.parents()[0]
634 mn = self.changelog.read(p)[0]
634 mn = self.changelog.read(p)[0]
635 mf = self.manifest.readflags(mn)
635 mf = self.manifest.readflags(mn)
636 m = self.manifest.read(mn)
636 m = self.manifest.read(mn)
637 if not wlock:
637 if not wlock:
638 wlock = self.wlock()
638 wlock = self.wlock()
639 for f in list:
639 for f in list:
640 if self.dirstate.state(f) not in "r":
640 if self.dirstate.state(f) not in "r":
641 self.ui.warn("%s not removed!\n" % f)
641 self.ui.warn("%s not removed!\n" % f)
642 else:
642 else:
643 t = self.file(f).read(m[f])
643 t = self.file(f).read(m[f])
644 self.wwrite(f, t)
644 self.wwrite(f, t)
645 util.set_exec(self.wjoin(f), mf[f])
645 util.set_exec(self.wjoin(f), mf[f])
646 self.dirstate.update([f], "n")
646 self.dirstate.update([f], "n")
647
647
648 def copy(self, source, dest, wlock=None):
648 def copy(self, source, dest, wlock=None):
649 p = self.wjoin(dest)
649 p = self.wjoin(dest)
650 if not os.path.exists(p):
650 if not os.path.exists(p):
651 self.ui.warn(_("%s does not exist!\n") % dest)
651 self.ui.warn(_("%s does not exist!\n") % dest)
652 elif not os.path.isfile(p):
652 elif not os.path.isfile(p):
653 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
653 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
654 else:
654 else:
655 if not wlock:
655 if not wlock:
656 wlock = self.wlock()
656 wlock = self.wlock()
657 if self.dirstate.state(dest) == '?':
657 if self.dirstate.state(dest) == '?':
658 self.dirstate.update([dest], "a")
658 self.dirstate.update([dest], "a")
659 self.dirstate.copy(source, dest)
659 self.dirstate.copy(source, dest)
660
660
661 def heads(self, start=None):
661 def heads(self, start=None):
662 heads = self.changelog.heads(start)
662 heads = self.changelog.heads(start)
663 # sort the output in rev descending order
663 # sort the output in rev descending order
664 heads = [(-self.changelog.rev(h), h) for h in heads]
664 heads = [(-self.changelog.rev(h), h) for h in heads]
665 heads.sort()
665 heads.sort()
666 return [n for (r, n) in heads]
666 return [n for (r, n) in heads]
667
667
668 # branchlookup returns a dict giving a list of branches for
668 # branchlookup returns a dict giving a list of branches for
669 # each head. A branch is defined as the tag of a node or
669 # each head. A branch is defined as the tag of a node or
670 # the branch of the node's parents. If a node has multiple
670 # the branch of the node's parents. If a node has multiple
671 # branch tags, tags are eliminated if they are visible from other
671 # branch tags, tags are eliminated if they are visible from other
672 # branch tags.
672 # branch tags.
673 #
673 #
674 # So, for this graph: a->b->c->d->e
674 # So, for this graph: a->b->c->d->e
675 # \ /
675 # \ /
676 # aa -----/
676 # aa -----/
677 # a has tag 2.6.12
677 # a has tag 2.6.12
678 # d has tag 2.6.13
678 # d has tag 2.6.13
679 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
679 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
680 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
680 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
681 # from the list.
681 # from the list.
682 #
682 #
683 # It is possible that more than one head will have the same branch tag.
683 # It is possible that more than one head will have the same branch tag.
684 # callers need to check the result for multiple heads under the same
684 # callers need to check the result for multiple heads under the same
685 # branch tag if that is a problem for them (ie checkout of a specific
685 # branch tag if that is a problem for them (ie checkout of a specific
686 # branch).
686 # branch).
687 #
687 #
688 # passing in a specific branch will limit the depth of the search
688 # passing in a specific branch will limit the depth of the search
689 # through the parents. It won't limit the branches returned in the
689 # through the parents. It won't limit the branches returned in the
690 # result though.
690 # result though.
691 def branchlookup(self, heads=None, branch=None):
691 def branchlookup(self, heads=None, branch=None):
692 if not heads:
692 if not heads:
693 heads = self.heads()
693 heads = self.heads()
694 headt = [ h for h in heads ]
694 headt = [ h for h in heads ]
695 chlog = self.changelog
695 chlog = self.changelog
696 branches = {}
696 branches = {}
697 merges = []
697 merges = []
698 seenmerge = {}
698 seenmerge = {}
699
699
700 # traverse the tree once for each head, recording in the branches
700 # traverse the tree once for each head, recording in the branches
701 # dict which tags are visible from this head. The branches
701 # dict which tags are visible from this head. The branches
702 # dict also records which tags are visible from each tag
702 # dict also records which tags are visible from each tag
703 # while we traverse.
703 # while we traverse.
704 while headt or merges:
704 while headt or merges:
705 if merges:
705 if merges:
706 n, found = merges.pop()
706 n, found = merges.pop()
707 visit = [n]
707 visit = [n]
708 else:
708 else:
709 h = headt.pop()
709 h = headt.pop()
710 visit = [h]
710 visit = [h]
711 found = [h]
711 found = [h]
712 seen = {}
712 seen = {}
713 while visit:
713 while visit:
714 n = visit.pop()
714 n = visit.pop()
715 if n in seen:
715 if n in seen:
716 continue
716 continue
717 pp = chlog.parents(n)
717 pp = chlog.parents(n)
718 tags = self.nodetags(n)
718 tags = self.nodetags(n)
719 if tags:
719 if tags:
720 for x in tags:
720 for x in tags:
721 if x == 'tip':
721 if x == 'tip':
722 continue
722 continue
723 for f in found:
723 for f in found:
724 branches.setdefault(f, {})[n] = 1
724 branches.setdefault(f, {})[n] = 1
725 branches.setdefault(n, {})[n] = 1
725 branches.setdefault(n, {})[n] = 1
726 break
726 break
727 if n not in found:
727 if n not in found:
728 found.append(n)
728 found.append(n)
729 if branch in tags:
729 if branch in tags:
730 continue
730 continue
731 seen[n] = 1
731 seen[n] = 1
732 if pp[1] != nullid and n not in seenmerge:
732 if pp[1] != nullid and n not in seenmerge:
733 merges.append((pp[1], [x for x in found]))
733 merges.append((pp[1], [x for x in found]))
734 seenmerge[n] = 1
734 seenmerge[n] = 1
735 if pp[0] != nullid:
735 if pp[0] != nullid:
736 visit.append(pp[0])
736 visit.append(pp[0])
737 # traverse the branches dict, eliminating branch tags from each
737 # traverse the branches dict, eliminating branch tags from each
738 # head that are visible from another branch tag for that head.
738 # head that are visible from another branch tag for that head.
739 out = {}
739 out = {}
740 viscache = {}
740 viscache = {}
741 for h in heads:
741 for h in heads:
742 def visible(node):
742 def visible(node):
743 if node in viscache:
743 if node in viscache:
744 return viscache[node]
744 return viscache[node]
745 ret = {}
745 ret = {}
746 visit = [node]
746 visit = [node]
747 while visit:
747 while visit:
748 x = visit.pop()
748 x = visit.pop()
749 if x in viscache:
749 if x in viscache:
750 ret.update(viscache[x])
750 ret.update(viscache[x])
751 elif x not in ret:
751 elif x not in ret:
752 ret[x] = 1
752 ret[x] = 1
753 if x in branches:
753 if x in branches:
754 visit[len(visit):] = branches[x].keys()
754 visit[len(visit):] = branches[x].keys()
755 viscache[node] = ret
755 viscache[node] = ret
756 return ret
756 return ret
757 if h not in branches:
757 if h not in branches:
758 continue
758 continue
759 # O(n^2), but somewhat limited. This only searches the
759 # O(n^2), but somewhat limited. This only searches the
760 # tags visible from a specific head, not all the tags in the
760 # tags visible from a specific head, not all the tags in the
761 # whole repo.
761 # whole repo.
762 for b in branches[h]:
762 for b in branches[h]:
763 vis = False
763 vis = False
764 for bb in branches[h].keys():
764 for bb in branches[h].keys():
765 if b != bb:
765 if b != bb:
766 if b in visible(bb):
766 if b in visible(bb):
767 vis = True
767 vis = True
768 break
768 break
769 if not vis:
769 if not vis:
770 l = out.setdefault(h, [])
770 l = out.setdefault(h, [])
771 l[len(l):] = self.nodetags(b)
771 l[len(l):] = self.nodetags(b)
772 return out
772 return out
773
773
774 def branches(self, nodes):
774 def branches(self, nodes):
775 if not nodes:
775 if not nodes:
776 nodes = [self.changelog.tip()]
776 nodes = [self.changelog.tip()]
777 b = []
777 b = []
778 for n in nodes:
778 for n in nodes:
779 t = n
779 t = n
780 while n:
780 while n:
781 p = self.changelog.parents(n)
781 p = self.changelog.parents(n)
782 if p[1] != nullid or p[0] == nullid:
782 if p[1] != nullid or p[0] == nullid:
783 b.append((t, n, p[0], p[1]))
783 b.append((t, n, p[0], p[1]))
784 break
784 break
785 n = p[0]
785 n = p[0]
786 return b
786 return b
787
787
788 def between(self, pairs):
788 def between(self, pairs):
789 r = []
789 r = []
790
790
791 for top, bottom in pairs:
791 for top, bottom in pairs:
792 n, l, i = top, [], 0
792 n, l, i = top, [], 0
793 f = 1
793 f = 1
794
794
795 while n != bottom:
795 while n != bottom:
796 p = self.changelog.parents(n)[0]
796 p = self.changelog.parents(n)[0]
797 if i == f:
797 if i == f:
798 l.append(n)
798 l.append(n)
799 f = f * 2
799 f = f * 2
800 n = p
800 n = p
801 i += 1
801 i += 1
802
802
803 r.append(l)
803 r.append(l)
804
804
805 return r
805 return r
806
806
807 def findincoming(self, remote, base=None, heads=None, force=False):
807 def findincoming(self, remote, base=None, heads=None, force=False):
808 m = self.changelog.nodemap
808 m = self.changelog.nodemap
809 search = []
809 search = []
810 fetch = {}
810 fetch = {}
811 seen = {}
811 seen = {}
812 seenbranch = {}
812 seenbranch = {}
813 if base == None:
813 if base == None:
814 base = {}
814 base = {}
815
815
816 # assume we're closer to the tip than the root
816 # assume we're closer to the tip than the root
817 # and start by examining the heads
817 # and start by examining the heads
818 self.ui.status(_("searching for changes\n"))
818 self.ui.status(_("searching for changes\n"))
819
819
820 if not heads:
820 if not heads:
821 heads = remote.heads()
821 heads = remote.heads()
822
822
823 unknown = []
823 unknown = []
824 for h in heads:
824 for h in heads:
825 if h not in m:
825 if h not in m:
826 unknown.append(h)
826 unknown.append(h)
827 else:
827 else:
828 base[h] = 1
828 base[h] = 1
829
829
830 if not unknown:
830 if not unknown:
831 return []
831 return []
832
832
833 rep = {}
833 rep = {}
834 reqcnt = 0
834 reqcnt = 0
835
835
836 # search through remote branches
836 # search through remote branches
837 # a 'branch' here is a linear segment of history, with four parts:
837 # a 'branch' here is a linear segment of history, with four parts:
838 # head, root, first parent, second parent
838 # head, root, first parent, second parent
839 # (a branch always has two parents (or none) by definition)
839 # (a branch always has two parents (or none) by definition)
840 unknown = remote.branches(unknown)
840 unknown = remote.branches(unknown)
841 while unknown:
841 while unknown:
842 r = []
842 r = []
843 while unknown:
843 while unknown:
844 n = unknown.pop(0)
844 n = unknown.pop(0)
845 if n[0] in seen:
845 if n[0] in seen:
846 continue
846 continue
847
847
848 self.ui.debug(_("examining %s:%s\n")
848 self.ui.debug(_("examining %s:%s\n")
849 % (short(n[0]), short(n[1])))
849 % (short(n[0]), short(n[1])))
850 if n[0] == nullid:
850 if n[0] == nullid:
851 break
851 break
852 if n in seenbranch:
852 if n in seenbranch:
853 self.ui.debug(_("branch already found\n"))
853 self.ui.debug(_("branch already found\n"))
854 continue
854 continue
855 if n[1] and n[1] in m: # do we know the base?
855 if n[1] and n[1] in m: # do we know the base?
856 self.ui.debug(_("found incomplete branch %s:%s\n")
856 self.ui.debug(_("found incomplete branch %s:%s\n")
857 % (short(n[0]), short(n[1])))
857 % (short(n[0]), short(n[1])))
858 search.append(n) # schedule branch range for scanning
858 search.append(n) # schedule branch range for scanning
859 seenbranch[n] = 1
859 seenbranch[n] = 1
860 else:
860 else:
861 if n[1] not in seen and n[1] not in fetch:
861 if n[1] not in seen and n[1] not in fetch:
862 if n[2] in m and n[3] in m:
862 if n[2] in m and n[3] in m:
863 self.ui.debug(_("found new changeset %s\n") %
863 self.ui.debug(_("found new changeset %s\n") %
864 short(n[1]))
864 short(n[1]))
865 fetch[n[1]] = 1 # earliest unknown
865 fetch[n[1]] = 1 # earliest unknown
866 base[n[2]] = 1 # latest known
866 base[n[2]] = 1 # latest known
867 continue
867 continue
868
868
869 for a in n[2:4]:
869 for a in n[2:4]:
870 if a not in rep:
870 if a not in rep:
871 r.append(a)
871 r.append(a)
872 rep[a] = 1
872 rep[a] = 1
873
873
874 seen[n[0]] = 1
874 seen[n[0]] = 1
875
875
876 if r:
876 if r:
877 reqcnt += 1
877 reqcnt += 1
878 self.ui.debug(_("request %d: %s\n") %
878 self.ui.debug(_("request %d: %s\n") %
879 (reqcnt, " ".join(map(short, r))))
879 (reqcnt, " ".join(map(short, r))))
880 for p in range(0, len(r), 10):
880 for p in range(0, len(r), 10):
881 for b in remote.branches(r[p:p+10]):
881 for b in remote.branches(r[p:p+10]):
882 self.ui.debug(_("received %s:%s\n") %
882 self.ui.debug(_("received %s:%s\n") %
883 (short(b[0]), short(b[1])))
883 (short(b[0]), short(b[1])))
884 if b[0] in m:
884 if b[0] in m:
885 self.ui.debug(_("found base node %s\n")
885 self.ui.debug(_("found base node %s\n")
886 % short(b[0]))
886 % short(b[0]))
887 base[b[0]] = 1
887 base[b[0]] = 1
888 elif b[0] not in seen:
888 elif b[0] not in seen:
889 unknown.append(b)
889 unknown.append(b)
890
890
891 # do binary search on the branches we found
891 # do binary search on the branches we found
892 while search:
892 while search:
893 n = search.pop(0)
893 n = search.pop(0)
894 reqcnt += 1
894 reqcnt += 1
895 l = remote.between([(n[0], n[1])])[0]
895 l = remote.between([(n[0], n[1])])[0]
896 l.append(n[1])
896 l.append(n[1])
897 p = n[0]
897 p = n[0]
898 f = 1
898 f = 1
899 for i in l:
899 for i in l:
900 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
900 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
901 if i in m:
901 if i in m:
902 if f <= 2:
902 if f <= 2:
903 self.ui.debug(_("found new branch changeset %s\n") %
903 self.ui.debug(_("found new branch changeset %s\n") %
904 short(p))
904 short(p))
905 fetch[p] = 1
905 fetch[p] = 1
906 base[i] = 1
906 base[i] = 1
907 else:
907 else:
908 self.ui.debug(_("narrowed branch search to %s:%s\n")
908 self.ui.debug(_("narrowed branch search to %s:%s\n")
909 % (short(p), short(i)))
909 % (short(p), short(i)))
910 search.append((p, i))
910 search.append((p, i))
911 break
911 break
912 p, f = i, f * 2
912 p, f = i, f * 2
913
913
914 # sanity check our fetch list
914 # sanity check our fetch list
915 for f in fetch.keys():
915 for f in fetch.keys():
916 if f in m:
916 if f in m:
917 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
917 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
918
918
919 if base.keys() == [nullid]:
919 if base.keys() == [nullid]:
920 if force:
920 if force:
921 self.ui.warn(_("warning: repository is unrelated\n"))
921 self.ui.warn(_("warning: repository is unrelated\n"))
922 else:
922 else:
923 raise util.Abort(_("repository is unrelated"))
923 raise util.Abort(_("repository is unrelated"))
924
924
925 self.ui.note(_("found new changesets starting at ") +
925 self.ui.note(_("found new changesets starting at ") +
926 " ".join([short(f) for f in fetch]) + "\n")
926 " ".join([short(f) for f in fetch]) + "\n")
927
927
928 self.ui.debug(_("%d total queries\n") % reqcnt)
928 self.ui.debug(_("%d total queries\n") % reqcnt)
929
929
930 return fetch.keys()
930 return fetch.keys()
931
931
932 def findoutgoing(self, remote, base=None, heads=None, force=False):
932 def findoutgoing(self, remote, base=None, heads=None, force=False):
933 """Return list of nodes that are roots of subsets not in remote
933 """Return list of nodes that are roots of subsets not in remote
934
934
935 If base dict is specified, assume that these nodes and their parents
935 If base dict is specified, assume that these nodes and their parents
936 exist on the remote side.
936 exist on the remote side.
937 If a list of heads is specified, return only nodes which are heads
937 If a list of heads is specified, return only nodes which are heads
938 or ancestors of these heads, and return a second element which
938 or ancestors of these heads, and return a second element which
939 contains all remote heads which get new children.
939 contains all remote heads which get new children.
940 """
940 """
941 if base == None:
941 if base == None:
942 base = {}
942 base = {}
943 self.findincoming(remote, base, heads, force=force)
943 self.findincoming(remote, base, heads, force=force)
944
944
945 self.ui.debug(_("common changesets up to ")
945 self.ui.debug(_("common changesets up to ")
946 + " ".join(map(short, base.keys())) + "\n")
946 + " ".join(map(short, base.keys())) + "\n")
947
947
948 remain = dict.fromkeys(self.changelog.nodemap)
948 remain = dict.fromkeys(self.changelog.nodemap)
949
949
950 # prune everything remote has from the tree
950 # prune everything remote has from the tree
951 del remain[nullid]
951 del remain[nullid]
952 remove = base.keys()
952 remove = base.keys()
953 while remove:
953 while remove:
954 n = remove.pop(0)
954 n = remove.pop(0)
955 if n in remain:
955 if n in remain:
956 del remain[n]
956 del remain[n]
957 for p in self.changelog.parents(n):
957 for p in self.changelog.parents(n):
958 remove.append(p)
958 remove.append(p)
959
959
960 # find every node whose parents have been pruned
960 # find every node whose parents have been pruned
961 subset = []
961 subset = []
962 # find every remote head that will get new children
962 # find every remote head that will get new children
963 updated_heads = {}
963 updated_heads = {}
964 for n in remain:
964 for n in remain:
965 p1, p2 = self.changelog.parents(n)
965 p1, p2 = self.changelog.parents(n)
966 if p1 not in remain and p2 not in remain:
966 if p1 not in remain and p2 not in remain:
967 subset.append(n)
967 subset.append(n)
968 if heads:
968 if heads:
969 if p1 in heads:
969 if p1 in heads:
970 updated_heads[p1] = True
970 updated_heads[p1] = True
971 if p2 in heads:
971 if p2 in heads:
972 updated_heads[p2] = True
972 updated_heads[p2] = True
973
973
974 # this is the set of all roots we have to push
974 # this is the set of all roots we have to push
975 if heads:
975 if heads:
976 return subset, updated_heads.keys()
976 return subset, updated_heads.keys()
977 else:
977 else:
978 return subset
978 return subset
979
979
980 def pull(self, remote, heads=None, force=False):
980 def pull(self, remote, heads=None, force=False):
981 l = self.lock()
981 l = self.lock()
982
982
983 # if we have an empty repo, fetch everything
983 # if we have an empty repo, fetch everything
984 if self.changelog.tip() == nullid:
984 if self.changelog.tip() == nullid:
985 self.ui.status(_("requesting all changes\n"))
985 self.ui.status(_("requesting all changes\n"))
986 fetch = [nullid]
986 fetch = [nullid]
987 else:
987 else:
988 fetch = self.findincoming(remote, force=force)
988 fetch = self.findincoming(remote, force=force)
989
989
990 if not fetch:
990 if not fetch:
991 self.ui.status(_("no changes found\n"))
991 self.ui.status(_("no changes found\n"))
992 return 0
992 return 0
993
993
994 if heads is None:
994 if heads is None:
995 cg = remote.changegroup(fetch, 'pull')
995 cg = remote.changegroup(fetch, 'pull')
996 else:
996 else:
997 cg = remote.changegroupsubset(fetch, heads, 'pull')
997 cg = remote.changegroupsubset(fetch, heads, 'pull')
998 return self.addchangegroup(cg)
998 return self.addchangegroup(cg)
999
999
1000 def push(self, remote, force=False, revs=None):
1000 def push(self, remote, force=False, revs=None):
1001 lock = remote.lock()
1001 lock = remote.lock()
1002
1002
1003 base = {}
1003 base = {}
1004 remote_heads = remote.heads()
1004 remote_heads = remote.heads()
1005 inc = self.findincoming(remote, base, remote_heads, force=force)
1005 inc = self.findincoming(remote, base, remote_heads, force=force)
1006 if not force and inc:
1006 if not force and inc:
1007 self.ui.warn(_("abort: unsynced remote changes!\n"))
1007 self.ui.warn(_("abort: unsynced remote changes!\n"))
1008 self.ui.status(_("(did you forget to sync?"
1008 self.ui.status(_("(did you forget to sync?"
1009 " use push -f to force)\n"))
1009 " use push -f to force)\n"))
1010 return 1
1010 return 1
1011
1011
1012 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1012 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1013 if revs is not None:
1013 if revs is not None:
1014 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1014 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1015 else:
1015 else:
1016 bases, heads = update, self.changelog.heads()
1016 bases, heads = update, self.changelog.heads()
1017
1017
1018 if not bases:
1018 if not bases:
1019 self.ui.status(_("no changes found\n"))
1019 self.ui.status(_("no changes found\n"))
1020 return 1
1020 return 1
1021 elif not force:
1021 elif not force:
1022 if revs is not None:
1022 if revs is not None:
1023 updated_heads = {}
1023 updated_heads = {}
1024 for base in msng_cl:
1024 for base in msng_cl:
1025 for parent in self.changelog.parents(base):
1025 for parent in self.changelog.parents(base):
1026 if parent in remote_heads:
1026 if parent in remote_heads:
1027 updated_heads[parent] = True
1027 updated_heads[parent] = True
1028 updated_heads = updated_heads.keys()
1028 updated_heads = updated_heads.keys()
1029 if len(updated_heads) < len(heads):
1029 if len(updated_heads) < len(heads):
1030 self.ui.warn(_("abort: push creates new remote branches!\n"))
1030 self.ui.warn(_("abort: push creates new remote branches!\n"))
1031 self.ui.status(_("(did you forget to merge?"
1031 self.ui.status(_("(did you forget to merge?"
1032 " use push -f to force)\n"))
1032 " use push -f to force)\n"))
1033 return 1
1033 return 1
1034
1034
1035 if revs is None:
1035 if revs is None:
1036 cg = self.changegroup(update, 'push')
1036 cg = self.changegroup(update, 'push')
1037 else:
1037 else:
1038 cg = self.changegroupsubset(update, revs, 'push')
1038 cg = self.changegroupsubset(update, revs, 'push')
1039 return remote.addchangegroup(cg)
1039 return remote.addchangegroup(cg)
1040
1040
1041 def changegroupsubset(self, bases, heads, source):
1041 def changegroupsubset(self, bases, heads, source):
1042 """This function generates a changegroup consisting of all the nodes
1042 """This function generates a changegroup consisting of all the nodes
1043 that are descendents of any of the bases, and ancestors of any of
1043 that are descendents of any of the bases, and ancestors of any of
1044 the heads.
1044 the heads.
1045
1045
1046 It is fairly complex as determining which filenodes and which
1046 It is fairly complex as determining which filenodes and which
1047 manifest nodes need to be included for the changeset to be complete
1047 manifest nodes need to be included for the changeset to be complete
1048 is non-trivial.
1048 is non-trivial.
1049
1049
1050 Another wrinkle is doing the reverse, figuring out which changeset in
1050 Another wrinkle is doing the reverse, figuring out which changeset in
1051 the changegroup a particular filenode or manifestnode belongs to."""
1051 the changegroup a particular filenode or manifestnode belongs to."""
1052
1052
1053 self.hook('preoutgoing', throw=True, source=source)
1053 self.hook('preoutgoing', throw=True, source=source)
1054
1054
1055 # Set up some initial variables
1055 # Set up some initial variables
1056 # Make it easy to refer to self.changelog
1056 # Make it easy to refer to self.changelog
1057 cl = self.changelog
1057 cl = self.changelog
1058 # msng is short for missing - compute the list of changesets in this
1058 # msng is short for missing - compute the list of changesets in this
1059 # changegroup.
1059 # changegroup.
1060 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1060 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1061 # Some bases may turn out to be superfluous, and some heads may be
1061 # Some bases may turn out to be superfluous, and some heads may be
1062 # too. nodesbetween will return the minimal set of bases and heads
1062 # too. nodesbetween will return the minimal set of bases and heads
1063 # necessary to re-create the changegroup.
1063 # necessary to re-create the changegroup.
1064
1064
1065 # Known heads are the list of heads that it is assumed the recipient
1065 # Known heads are the list of heads that it is assumed the recipient
1066 # of this changegroup will know about.
1066 # of this changegroup will know about.
1067 knownheads = {}
1067 knownheads = {}
1068 # We assume that all parents of bases are known heads.
1068 # We assume that all parents of bases are known heads.
1069 for n in bases:
1069 for n in bases:
1070 for p in cl.parents(n):
1070 for p in cl.parents(n):
1071 if p != nullid:
1071 if p != nullid:
1072 knownheads[p] = 1
1072 knownheads[p] = 1
1073 knownheads = knownheads.keys()
1073 knownheads = knownheads.keys()
1074 if knownheads:
1074 if knownheads:
1075 # Now that we know what heads are known, we can compute which
1075 # Now that we know what heads are known, we can compute which
1076 # changesets are known. The recipient must know about all
1076 # changesets are known. The recipient must know about all
1077 # changesets required to reach the known heads from the null
1077 # changesets required to reach the known heads from the null
1078 # changeset.
1078 # changeset.
1079 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1079 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1080 junk = None
1080 junk = None
1081 # Transform the list into an ersatz set.
1081 # Transform the list into an ersatz set.
1082 has_cl_set = dict.fromkeys(has_cl_set)
1082 has_cl_set = dict.fromkeys(has_cl_set)
1083 else:
1083 else:
1084 # If there were no known heads, the recipient cannot be assumed to
1084 # If there were no known heads, the recipient cannot be assumed to
1085 # know about any changesets.
1085 # know about any changesets.
1086 has_cl_set = {}
1086 has_cl_set = {}
1087
1087
1088 # Make it easy to refer to self.manifest
1088 # Make it easy to refer to self.manifest
1089 mnfst = self.manifest
1089 mnfst = self.manifest
1090 # We don't know which manifests are missing yet
1090 # We don't know which manifests are missing yet
1091 msng_mnfst_set = {}
1091 msng_mnfst_set = {}
1092 # Nor do we know which filenodes are missing.
1092 # Nor do we know which filenodes are missing.
1093 msng_filenode_set = {}
1093 msng_filenode_set = {}
1094
1094
1095 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1095 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1096 junk = None
1096 junk = None
1097
1097
1098 # A changeset always belongs to itself, so the changenode lookup
1098 # A changeset always belongs to itself, so the changenode lookup
1099 # function for a changenode is identity.
1099 # function for a changenode is identity.
1100 def identity(x):
1100 def identity(x):
1101 return x
1101 return x
1102
1102
1103 # A function generating function. Sets up an environment for the
1103 # A function generating function. Sets up an environment for the
1104 # inner function.
1104 # inner function.
1105 def cmp_by_rev_func(revlog):
1105 def cmp_by_rev_func(revlog):
1106 # Compare two nodes by their revision number in the environment's
1106 # Compare two nodes by their revision number in the environment's
1107 # revision history. Since the revision number both represents the
1107 # revision history. Since the revision number both represents the
1108 # most efficient order to read the nodes in, and represents a
1108 # most efficient order to read the nodes in, and represents a
1109 # topological sorting of the nodes, this function is often useful.
1109 # topological sorting of the nodes, this function is often useful.
1110 def cmp_by_rev(a, b):
1110 def cmp_by_rev(a, b):
1111 return cmp(revlog.rev(a), revlog.rev(b))
1111 return cmp(revlog.rev(a), revlog.rev(b))
1112 return cmp_by_rev
1112 return cmp_by_rev
1113
1113
1114 # If we determine that a particular file or manifest node must be a
1114 # If we determine that a particular file or manifest node must be a
1115 # node that the recipient of the changegroup will already have, we can
1115 # node that the recipient of the changegroup will already have, we can
1116 # also assume the recipient will have all the parents. This function
1116 # also assume the recipient will have all the parents. This function
1117 # prunes them from the set of missing nodes.
1117 # prunes them from the set of missing nodes.
1118 def prune_parents(revlog, hasset, msngset):
1118 def prune_parents(revlog, hasset, msngset):
1119 haslst = hasset.keys()
1119 haslst = hasset.keys()
1120 haslst.sort(cmp_by_rev_func(revlog))
1120 haslst.sort(cmp_by_rev_func(revlog))
1121 for node in haslst:
1121 for node in haslst:
1122 parentlst = [p for p in revlog.parents(node) if p != nullid]
1122 parentlst = [p for p in revlog.parents(node) if p != nullid]
1123 while parentlst:
1123 while parentlst:
1124 n = parentlst.pop()
1124 n = parentlst.pop()
1125 if n not in hasset:
1125 if n not in hasset:
1126 hasset[n] = 1
1126 hasset[n] = 1
1127 p = [p for p in revlog.parents(n) if p != nullid]
1127 p = [p for p in revlog.parents(n) if p != nullid]
1128 parentlst.extend(p)
1128 parentlst.extend(p)
1129 for n in hasset:
1129 for n in hasset:
1130 msngset.pop(n, None)
1130 msngset.pop(n, None)
1131
1131
1132 # This is a function generating function used to set up an environment
1132 # This is a function generating function used to set up an environment
1133 # for the inner function to execute in.
1133 # for the inner function to execute in.
1134 def manifest_and_file_collector(changedfileset):
1134 def manifest_and_file_collector(changedfileset):
1135 # This is an information gathering function that gathers
1135 # This is an information gathering function that gathers
1136 # information from each changeset node that goes out as part of
1136 # information from each changeset node that goes out as part of
1137 # the changegroup. The information gathered is a list of which
1137 # the changegroup. The information gathered is a list of which
1138 # manifest nodes are potentially required (the recipient may
1138 # manifest nodes are potentially required (the recipient may
1139 # already have them) and total list of all files which were
1139 # already have them) and total list of all files which were
1140 # changed in any changeset in the changegroup.
1140 # changed in any changeset in the changegroup.
1141 #
1141 #
1142 # We also remember the first changenode we saw any manifest
1142 # We also remember the first changenode we saw any manifest
1143 # referenced by so we can later determine which changenode 'owns'
1143 # referenced by so we can later determine which changenode 'owns'
1144 # the manifest.
1144 # the manifest.
1145 def collect_manifests_and_files(clnode):
1145 def collect_manifests_and_files(clnode):
1146 c = cl.read(clnode)
1146 c = cl.read(clnode)
1147 for f in c[3]:
1147 for f in c[3]:
1148 # This is to make sure we only have one instance of each
1148 # This is to make sure we only have one instance of each
1149 # filename string for each filename.
1149 # filename string for each filename.
1150 changedfileset.setdefault(f, f)
1150 changedfileset.setdefault(f, f)
1151 msng_mnfst_set.setdefault(c[0], clnode)
1151 msng_mnfst_set.setdefault(c[0], clnode)
1152 return collect_manifests_and_files
1152 return collect_manifests_and_files
1153
1153
1154 # Figure out which manifest nodes (of the ones we think might be part
1154 # Figure out which manifest nodes (of the ones we think might be part
1155 # of the changegroup) the recipient must know about and remove them
1155 # of the changegroup) the recipient must know about and remove them
1156 # from the changegroup.
1156 # from the changegroup.
1157 def prune_manifests():
1157 def prune_manifests():
1158 has_mnfst_set = {}
1158 has_mnfst_set = {}
1159 for n in msng_mnfst_set:
1159 for n in msng_mnfst_set:
1160 # If a 'missing' manifest thinks it belongs to a changenode
1160 # If a 'missing' manifest thinks it belongs to a changenode
1161 # the recipient is assumed to have, obviously the recipient
1161 # the recipient is assumed to have, obviously the recipient
1162 # must have that manifest.
1162 # must have that manifest.
1163 linknode = cl.node(mnfst.linkrev(n))
1163 linknode = cl.node(mnfst.linkrev(n))
1164 if linknode in has_cl_set:
1164 if linknode in has_cl_set:
1165 has_mnfst_set[n] = 1
1165 has_mnfst_set[n] = 1
1166 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1166 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1167
1167
1168 # Use the information collected in collect_manifests_and_files to say
1168 # Use the information collected in collect_manifests_and_files to say
1169 # which changenode any manifestnode belongs to.
1169 # which changenode any manifestnode belongs to.
1170 def lookup_manifest_link(mnfstnode):
1170 def lookup_manifest_link(mnfstnode):
1171 return msng_mnfst_set[mnfstnode]
1171 return msng_mnfst_set[mnfstnode]
1172
1172
1173 # A function generating function that sets up the initial environment
1173 # A function generating function that sets up the initial environment
1174 # the inner function.
1174 # the inner function.
1175 def filenode_collector(changedfiles):
1175 def filenode_collector(changedfiles):
1176 next_rev = [0]
1176 next_rev = [0]
1177 # This gathers information from each manifestnode included in the
1177 # This gathers information from each manifestnode included in the
1178 # changegroup about which filenodes the manifest node references
1178 # changegroup about which filenodes the manifest node references
1179 # so we can include those in the changegroup too.
1179 # so we can include those in the changegroup too.
1180 #
1180 #
1181 # It also remembers which changenode each filenode belongs to. It
1181 # It also remembers which changenode each filenode belongs to. It
1182 # does this by assuming the a filenode belongs to the changenode
1182 # does this by assuming the a filenode belongs to the changenode
1183 # the first manifest that references it belongs to.
1183 # the first manifest that references it belongs to.
1184 def collect_msng_filenodes(mnfstnode):
1184 def collect_msng_filenodes(mnfstnode):
1185 r = mnfst.rev(mnfstnode)
1185 r = mnfst.rev(mnfstnode)
1186 if r == next_rev[0]:
1186 if r == next_rev[0]:
1187 # If the last rev we looked at was the one just previous,
1187 # If the last rev we looked at was the one just previous,
1188 # we only need to see a diff.
1188 # we only need to see a diff.
1189 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1189 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1190 # For each line in the delta
1190 # For each line in the delta
1191 for dline in delta.splitlines():
1191 for dline in delta.splitlines():
1192 # get the filename and filenode for that line
1192 # get the filename and filenode for that line
1193 f, fnode = dline.split('\0')
1193 f, fnode = dline.split('\0')
1194 fnode = bin(fnode[:40])
1194 fnode = bin(fnode[:40])
1195 f = changedfiles.get(f, None)
1195 f = changedfiles.get(f, None)
1196 # And if the file is in the list of files we care
1196 # And if the file is in the list of files we care
1197 # about.
1197 # about.
1198 if f is not None:
1198 if f is not None:
1199 # Get the changenode this manifest belongs to
1199 # Get the changenode this manifest belongs to
1200 clnode = msng_mnfst_set[mnfstnode]
1200 clnode = msng_mnfst_set[mnfstnode]
1201 # Create the set of filenodes for the file if
1201 # Create the set of filenodes for the file if
1202 # there isn't one already.
1202 # there isn't one already.
1203 ndset = msng_filenode_set.setdefault(f, {})
1203 ndset = msng_filenode_set.setdefault(f, {})
1204 # And set the filenode's changelog node to the
1204 # And set the filenode's changelog node to the
1205 # manifest's if it hasn't been set already.
1205 # manifest's if it hasn't been set already.
1206 ndset.setdefault(fnode, clnode)
1206 ndset.setdefault(fnode, clnode)
1207 else:
1207 else:
1208 # Otherwise we need a full manifest.
1208 # Otherwise we need a full manifest.
1209 m = mnfst.read(mnfstnode)
1209 m = mnfst.read(mnfstnode)
1210 # For every file in we care about.
1210 # For every file in we care about.
1211 for f in changedfiles:
1211 for f in changedfiles:
1212 fnode = m.get(f, None)
1212 fnode = m.get(f, None)
1213 # If it's in the manifest
1213 # If it's in the manifest
1214 if fnode is not None:
1214 if fnode is not None:
1215 # See comments above.
1215 # See comments above.
1216 clnode = msng_mnfst_set[mnfstnode]
1216 clnode = msng_mnfst_set[mnfstnode]
1217 ndset = msng_filenode_set.setdefault(f, {})
1217 ndset = msng_filenode_set.setdefault(f, {})
1218 ndset.setdefault(fnode, clnode)
1218 ndset.setdefault(fnode, clnode)
1219 # Remember the revision we hope to see next.
1219 # Remember the revision we hope to see next.
1220 next_rev[0] = r + 1
1220 next_rev[0] = r + 1
1221 return collect_msng_filenodes
1221 return collect_msng_filenodes
1222
1222
1223 # We have a list of filenodes we think we need for a file, lets remove
1223 # We have a list of filenodes we think we need for a file, lets remove
1224 # all those we now the recipient must have.
1224 # all those we now the recipient must have.
1225 def prune_filenodes(f, filerevlog):
1225 def prune_filenodes(f, filerevlog):
1226 msngset = msng_filenode_set[f]
1226 msngset = msng_filenode_set[f]
1227 hasset = {}
1227 hasset = {}
1228 # If a 'missing' filenode thinks it belongs to a changenode we
1228 # If a 'missing' filenode thinks it belongs to a changenode we
1229 # assume the recipient must have, then the recipient must have
1229 # assume the recipient must have, then the recipient must have
1230 # that filenode.
1230 # that filenode.
1231 for n in msngset:
1231 for n in msngset:
1232 clnode = cl.node(filerevlog.linkrev(n))
1232 clnode = cl.node(filerevlog.linkrev(n))
1233 if clnode in has_cl_set:
1233 if clnode in has_cl_set:
1234 hasset[n] = 1
1234 hasset[n] = 1
1235 prune_parents(filerevlog, hasset, msngset)
1235 prune_parents(filerevlog, hasset, msngset)
1236
1236
1237 # A function generator function that sets up the a context for the
1237 # A function generator function that sets up the a context for the
1238 # inner function.
1238 # inner function.
1239 def lookup_filenode_link_func(fname):
1239 def lookup_filenode_link_func(fname):
1240 msngset = msng_filenode_set[fname]
1240 msngset = msng_filenode_set[fname]
1241 # Lookup the changenode the filenode belongs to.
1241 # Lookup the changenode the filenode belongs to.
1242 def lookup_filenode_link(fnode):
1242 def lookup_filenode_link(fnode):
1243 return msngset[fnode]
1243 return msngset[fnode]
1244 return lookup_filenode_link
1244 return lookup_filenode_link
1245
1245
1246 # Now that we have all theses utility functions to help out and
1246 # Now that we have all theses utility functions to help out and
1247 # logically divide up the task, generate the group.
1247 # logically divide up the task, generate the group.
1248 def gengroup():
1248 def gengroup():
1249 # The set of changed files starts empty.
1249 # The set of changed files starts empty.
1250 changedfiles = {}
1250 changedfiles = {}
1251 # Create a changenode group generator that will call our functions
1251 # Create a changenode group generator that will call our functions
1252 # back to lookup the owning changenode and collect information.
1252 # back to lookup the owning changenode and collect information.
1253 group = cl.group(msng_cl_lst, identity,
1253 group = cl.group(msng_cl_lst, identity,
1254 manifest_and_file_collector(changedfiles))
1254 manifest_and_file_collector(changedfiles))
1255 for chnk in group:
1255 for chnk in group:
1256 yield chnk
1256 yield chnk
1257
1257
1258 # The list of manifests has been collected by the generator
1258 # The list of manifests has been collected by the generator
1259 # calling our functions back.
1259 # calling our functions back.
1260 prune_manifests()
1260 prune_manifests()
1261 msng_mnfst_lst = msng_mnfst_set.keys()
1261 msng_mnfst_lst = msng_mnfst_set.keys()
1262 # Sort the manifestnodes by revision number.
1262 # Sort the manifestnodes by revision number.
1263 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1263 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1264 # Create a generator for the manifestnodes that calls our lookup
1264 # Create a generator for the manifestnodes that calls our lookup
1265 # and data collection functions back.
1265 # and data collection functions back.
1266 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1266 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1267 filenode_collector(changedfiles))
1267 filenode_collector(changedfiles))
1268 for chnk in group:
1268 for chnk in group:
1269 yield chnk
1269 yield chnk
1270
1270
1271 # These are no longer needed, dereference and toss the memory for
1271 # These are no longer needed, dereference and toss the memory for
1272 # them.
1272 # them.
1273 msng_mnfst_lst = None
1273 msng_mnfst_lst = None
1274 msng_mnfst_set.clear()
1274 msng_mnfst_set.clear()
1275
1275
1276 changedfiles = changedfiles.keys()
1276 changedfiles = changedfiles.keys()
1277 changedfiles.sort()
1277 changedfiles.sort()
1278 # Go through all our files in order sorted by name.
1278 # Go through all our files in order sorted by name.
1279 for fname in changedfiles:
1279 for fname in changedfiles:
1280 filerevlog = self.file(fname)
1280 filerevlog = self.file(fname)
1281 # Toss out the filenodes that the recipient isn't really
1281 # Toss out the filenodes that the recipient isn't really
1282 # missing.
1282 # missing.
1283 if msng_filenode_set.has_key(fname):
1283 if msng_filenode_set.has_key(fname):
1284 prune_filenodes(fname, filerevlog)
1284 prune_filenodes(fname, filerevlog)
1285 msng_filenode_lst = msng_filenode_set[fname].keys()
1285 msng_filenode_lst = msng_filenode_set[fname].keys()
1286 else:
1286 else:
1287 msng_filenode_lst = []
1287 msng_filenode_lst = []
1288 # If any filenodes are left, generate the group for them,
1288 # If any filenodes are left, generate the group for them,
1289 # otherwise don't bother.
1289 # otherwise don't bother.
1290 if len(msng_filenode_lst) > 0:
1290 if len(msng_filenode_lst) > 0:
1291 yield changegroup.genchunk(fname)
1291 yield changegroup.genchunk(fname)
1292 # Sort the filenodes by their revision #
1292 # Sort the filenodes by their revision #
1293 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1293 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1294 # Create a group generator and only pass in a changenode
1294 # Create a group generator and only pass in a changenode
1295 # lookup function as we need to collect no information
1295 # lookup function as we need to collect no information
1296 # from filenodes.
1296 # from filenodes.
1297 group = filerevlog.group(msng_filenode_lst,
1297 group = filerevlog.group(msng_filenode_lst,
1298 lookup_filenode_link_func(fname))
1298 lookup_filenode_link_func(fname))
1299 for chnk in group:
1299 for chnk in group:
1300 yield chnk
1300 yield chnk
1301 if msng_filenode_set.has_key(fname):
1301 if msng_filenode_set.has_key(fname):
1302 # Don't need this anymore, toss it to free memory.
1302 # Don't need this anymore, toss it to free memory.
1303 del msng_filenode_set[fname]
1303 del msng_filenode_set[fname]
1304 # Signal that no more groups are left.
1304 # Signal that no more groups are left.
1305 yield changegroup.closechunk()
1305 yield changegroup.closechunk()
1306
1306
1307 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1307 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1308
1308
1309 return util.chunkbuffer(gengroup())
1309 return util.chunkbuffer(gengroup())
1310
1310
1311 def changegroup(self, basenodes, source):
1311 def changegroup(self, basenodes, source):
1312 """Generate a changegroup of all nodes that we have that a recipient
1312 """Generate a changegroup of all nodes that we have that a recipient
1313 doesn't.
1313 doesn't.
1314
1314
1315 This is much easier than the previous function as we can assume that
1315 This is much easier than the previous function as we can assume that
1316 the recipient has any changenode we aren't sending them."""
1316 the recipient has any changenode we aren't sending them."""
1317
1317
1318 self.hook('preoutgoing', throw=True, source=source)
1318 self.hook('preoutgoing', throw=True, source=source)
1319
1319
1320 cl = self.changelog
1320 cl = self.changelog
1321 nodes = cl.nodesbetween(basenodes, None)[0]
1321 nodes = cl.nodesbetween(basenodes, None)[0]
1322 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1322 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1323
1323
1324 def identity(x):
1324 def identity(x):
1325 return x
1325 return x
1326
1326
1327 def gennodelst(revlog):
1327 def gennodelst(revlog):
1328 for r in xrange(0, revlog.count()):
1328 for r in xrange(0, revlog.count()):
1329 n = revlog.node(r)
1329 n = revlog.node(r)
1330 if revlog.linkrev(n) in revset:
1330 if revlog.linkrev(n) in revset:
1331 yield n
1331 yield n
1332
1332
1333 def changed_file_collector(changedfileset):
1333 def changed_file_collector(changedfileset):
1334 def collect_changed_files(clnode):
1334 def collect_changed_files(clnode):
1335 c = cl.read(clnode)
1335 c = cl.read(clnode)
1336 for fname in c[3]:
1336 for fname in c[3]:
1337 changedfileset[fname] = 1
1337 changedfileset[fname] = 1
1338 return collect_changed_files
1338 return collect_changed_files
1339
1339
1340 def lookuprevlink_func(revlog):
1340 def lookuprevlink_func(revlog):
1341 def lookuprevlink(n):
1341 def lookuprevlink(n):
1342 return cl.node(revlog.linkrev(n))
1342 return cl.node(revlog.linkrev(n))
1343 return lookuprevlink
1343 return lookuprevlink
1344
1344
1345 def gengroup():
1345 def gengroup():
1346 # construct a list of all changed files
1346 # construct a list of all changed files
1347 changedfiles = {}
1347 changedfiles = {}
1348
1348
1349 for chnk in cl.group(nodes, identity,
1349 for chnk in cl.group(nodes, identity,
1350 changed_file_collector(changedfiles)):
1350 changed_file_collector(changedfiles)):
1351 yield chnk
1351 yield chnk
1352 changedfiles = changedfiles.keys()
1352 changedfiles = changedfiles.keys()
1353 changedfiles.sort()
1353 changedfiles.sort()
1354
1354
1355 mnfst = self.manifest
1355 mnfst = self.manifest
1356 nodeiter = gennodelst(mnfst)
1356 nodeiter = gennodelst(mnfst)
1357 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1357 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1358 yield chnk
1358 yield chnk
1359
1359
1360 for fname in changedfiles:
1360 for fname in changedfiles:
1361 filerevlog = self.file(fname)
1361 filerevlog = self.file(fname)
1362 nodeiter = gennodelst(filerevlog)
1362 nodeiter = gennodelst(filerevlog)
1363 nodeiter = list(nodeiter)
1363 nodeiter = list(nodeiter)
1364 if nodeiter:
1364 if nodeiter:
1365 yield changegroup.genchunk(fname)
1365 yield changegroup.genchunk(fname)
1366 lookup = lookuprevlink_func(filerevlog)
1366 lookup = lookuprevlink_func(filerevlog)
1367 for chnk in filerevlog.group(nodeiter, lookup):
1367 for chnk in filerevlog.group(nodeiter, lookup):
1368 yield chnk
1368 yield chnk
1369
1369
1370 yield changegroup.closechunk()
1370 yield changegroup.closechunk()
1371 self.hook('outgoing', node=hex(nodes[0]), source=source)
1371 self.hook('outgoing', node=hex(nodes[0]), source=source)
1372
1372
1373 return util.chunkbuffer(gengroup())
1373 return util.chunkbuffer(gengroup())
1374
1374
1375 def addchangegroup(self, source):
1375 def addchangegroup(self, source):
1376 """add changegroup to repo.
1376 """add changegroup to repo.
1377 returns number of heads modified or added + 1."""
1377 returns number of heads modified or added + 1."""
1378
1378
1379 def csmap(x):
1379 def csmap(x):
1380 self.ui.debug(_("add changeset %s\n") % short(x))
1380 self.ui.debug(_("add changeset %s\n") % short(x))
1381 return cl.count()
1381 return cl.count()
1382
1382
1383 def revmap(x):
1383 def revmap(x):
1384 return cl.rev(x)
1384 return cl.rev(x)
1385
1385
1386 if not source:
1386 if not source:
1387 return 0
1387 return 0
1388
1388
1389 self.hook('prechangegroup', throw=True)
1389 self.hook('prechangegroup', throw=True)
1390
1390
1391 changesets = files = revisions = 0
1391 changesets = files = revisions = 0
1392
1392
1393 tr = self.transaction()
1393 tr = self.transaction()
1394
1394
1395 # write changelog and manifest data to temp files so
1395 # write changelog and manifest data to temp files so
1396 # concurrent readers will not see inconsistent view
1396 # concurrent readers will not see inconsistent view
1397 cl = appendfile.appendchangelog(self.opener)
1397 cl = appendfile.appendchangelog(self.opener)
1398
1398
1399 oldheads = len(cl.heads())
1399 oldheads = len(cl.heads())
1400
1400
1401 # pull off the changeset group
1401 # pull off the changeset group
1402 self.ui.status(_("adding changesets\n"))
1402 self.ui.status(_("adding changesets\n"))
1403 co = cl.tip()
1403 co = cl.tip()
1404 chunkiter = changegroup.chunkiter(source)
1404 chunkiter = changegroup.chunkiter(source)
1405 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1405 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1406 cnr, cor = map(cl.rev, (cn, co))
1406 cnr, cor = map(cl.rev, (cn, co))
1407 if cn == nullid:
1407 if cn == nullid:
1408 cnr = cor
1408 cnr = cor
1409 changesets = cnr - cor
1409 changesets = cnr - cor
1410
1410
1411 mf = appendfile.appendmanifest(self.opener)
1411 mf = appendfile.appendmanifest(self.opener)
1412
1412
1413 # pull off the manifest group
1413 # pull off the manifest group
1414 self.ui.status(_("adding manifests\n"))
1414 self.ui.status(_("adding manifests\n"))
1415 mm = mf.tip()
1415 mm = mf.tip()
1416 chunkiter = changegroup.chunkiter(source)
1416 chunkiter = changegroup.chunkiter(source)
1417 mo = mf.addgroup(chunkiter, revmap, tr)
1417 mo = mf.addgroup(chunkiter, revmap, tr)
1418
1418
1419 # process the files
1419 # process the files
1420 self.ui.status(_("adding file changes\n"))
1420 self.ui.status(_("adding file changes\n"))
1421 while 1:
1421 while 1:
1422 f = changegroup.getchunk(source)
1422 f = changegroup.getchunk(source)
1423 if not f:
1423 if not f:
1424 break
1424 break
1425 self.ui.debug(_("adding %s revisions\n") % f)
1425 self.ui.debug(_("adding %s revisions\n") % f)
1426 fl = self.file(f)
1426 fl = self.file(f)
1427 o = fl.count()
1427 o = fl.count()
1428 chunkiter = changegroup.chunkiter(source)
1428 chunkiter = changegroup.chunkiter(source)
1429 n = fl.addgroup(chunkiter, revmap, tr)
1429 n = fl.addgroup(chunkiter, revmap, tr)
1430 revisions += fl.count() - o
1430 revisions += fl.count() - o
1431 files += 1
1431 files += 1
1432
1432
1433 # write order here is important so concurrent readers will see
1433 # write order here is important so concurrent readers will see
1434 # consistent view of repo
1434 # consistent view of repo
1435 mf.writedata()
1435 mf.writedata()
1436 cl.writedata()
1436 cl.writedata()
1437
1437
1438 # make changelog and manifest see real files again
1438 # make changelog and manifest see real files again
1439 self.changelog = changelog.changelog(self.opener)
1439 self.changelog = changelog.changelog(self.opener)
1440 self.manifest = manifest.manifest(self.opener)
1440 self.manifest = manifest.manifest(self.opener)
1441
1441
1442 newheads = len(self.changelog.heads())
1442 newheads = len(self.changelog.heads())
1443 heads = ""
1443 heads = ""
1444 if oldheads and newheads > oldheads:
1444 if oldheads and newheads > oldheads:
1445 heads = _(" (+%d heads)") % (newheads - oldheads)
1445 heads = _(" (+%d heads)") % (newheads - oldheads)
1446
1446
1447 self.ui.status(_("added %d changesets"
1447 self.ui.status(_("added %d changesets"
1448 " with %d changes to %d files%s\n")
1448 " with %d changes to %d files%s\n")
1449 % (changesets, revisions, files, heads))
1449 % (changesets, revisions, files, heads))
1450
1450
1451 self.hook('pretxnchangegroup', throw=True,
1451 self.hook('pretxnchangegroup', throw=True,
1452 node=hex(self.changelog.node(cor+1)))
1452 node=hex(self.changelog.node(cor+1)))
1453
1453
1454 tr.close()
1454 tr.close()
1455
1455
1456 if changesets > 0:
1456 if changesets > 0:
1457 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1457 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1458
1458
1459 for i in range(cor + 1, cnr + 1):
1459 for i in range(cor + 1, cnr + 1):
1460 self.hook("incoming", node=hex(self.changelog.node(i)))
1460 self.hook("incoming", node=hex(self.changelog.node(i)))
1461
1461
1462 return newheads - oldheads + 1
1462 return newheads - oldheads + 1
1463
1463
1464 def update(self, node, allow=False, force=False, choose=None,
1464 def update(self, node, allow=False, force=False, choose=None,
1465 moddirstate=True, forcemerge=False, wlock=None):
1465 moddirstate=True, forcemerge=False, wlock=None):
1466 pl = self.dirstate.parents()
1466 pl = self.dirstate.parents()
1467 if not force and pl[1] != nullid:
1467 if not force and pl[1] != nullid:
1468 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1468 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1469 return 1
1469 return 1
1470
1470
1471 err = False
1471 err = False
1472
1472
1473 p1, p2 = pl[0], node
1473 p1, p2 = pl[0], node
1474 pa = self.changelog.ancestor(p1, p2)
1474 pa = self.changelog.ancestor(p1, p2)
1475 m1n = self.changelog.read(p1)[0]
1475 m1n = self.changelog.read(p1)[0]
1476 m2n = self.changelog.read(p2)[0]
1476 m2n = self.changelog.read(p2)[0]
1477 man = self.manifest.ancestor(m1n, m2n)
1477 man = self.manifest.ancestor(m1n, m2n)
1478 m1 = self.manifest.read(m1n)
1478 m1 = self.manifest.read(m1n)
1479 mf1 = self.manifest.readflags(m1n)
1479 mf1 = self.manifest.readflags(m1n)
1480 m2 = self.manifest.read(m2n).copy()
1480 m2 = self.manifest.read(m2n).copy()
1481 mf2 = self.manifest.readflags(m2n)
1481 mf2 = self.manifest.readflags(m2n)
1482 ma = self.manifest.read(man)
1482 ma = self.manifest.read(man)
1483 mfa = self.manifest.readflags(man)
1483 mfa = self.manifest.readflags(man)
1484
1484
1485 modified, added, removed, deleted, unknown = self.changes()
1485 modified, added, removed, deleted, unknown = self.changes()
1486
1486
1487 # is this a jump, or a merge? i.e. is there a linear path
1487 # is this a jump, or a merge? i.e. is there a linear path
1488 # from p1 to p2?
1488 # from p1 to p2?
1489 linear_path = (pa == p1 or pa == p2)
1489 linear_path = (pa == p1 or pa == p2)
1490
1490
1491 if allow and linear_path:
1491 if allow and linear_path:
1492 raise util.Abort(_("there is nothing to merge, "
1492 raise util.Abort(_("there is nothing to merge, "
1493 "just use 'hg update'"))
1493 "just use 'hg update'"))
1494 if allow and not forcemerge:
1494 if allow and not forcemerge:
1495 if modified or added or removed:
1495 if modified or added or removed:
1496 raise util.Abort(_("outstanding uncommitted changes"))
1496 raise util.Abort(_("outstanding uncommitted changes"))
1497 if not forcemerge and not force:
1497 if not forcemerge and not force:
1498 for f in unknown:
1498 for f in unknown:
1499 if f in m2:
1499 if f in m2:
1500 t1 = self.wread(f)
1500 t1 = self.wread(f)
1501 t2 = self.file(f).read(m2[f])
1501 t2 = self.file(f).read(m2[f])
1502 if cmp(t1, t2) != 0:
1502 if cmp(t1, t2) != 0:
1503 raise util.Abort(_("'%s' already exists in the working"
1503 raise util.Abort(_("'%s' already exists in the working"
1504 " dir and differs from remote") % f)
1504 " dir and differs from remote") % f)
1505
1505
1506 # resolve the manifest to determine which files
1506 # resolve the manifest to determine which files
1507 # we care about merging
1507 # we care about merging
1508 self.ui.note(_("resolving manifests\n"))
1508 self.ui.note(_("resolving manifests\n"))
1509 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1509 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1510 (force, allow, moddirstate, linear_path))
1510 (force, allow, moddirstate, linear_path))
1511 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1511 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1512 (short(man), short(m1n), short(m2n)))
1512 (short(man), short(m1n), short(m2n)))
1513
1513
1514 merge = {}
1514 merge = {}
1515 get = {}
1515 get = {}
1516 remove = []
1516 remove = []
1517
1517
1518 # construct a working dir manifest
1518 # construct a working dir manifest
1519 mw = m1.copy()
1519 mw = m1.copy()
1520 mfw = mf1.copy()
1520 mfw = mf1.copy()
1521 umap = dict.fromkeys(unknown)
1521 umap = dict.fromkeys(unknown)
1522
1522
1523 for f in added + modified + unknown:
1523 for f in added + modified + unknown:
1524 mw[f] = ""
1524 mw[f] = ""
1525 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1525 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1526
1526
1527 if moddirstate and not wlock:
1527 if moddirstate and not wlock:
1528 wlock = self.wlock()
1528 wlock = self.wlock()
1529
1529
1530 for f in deleted + removed:
1530 for f in deleted + removed:
1531 if f in mw:
1531 if f in mw:
1532 del mw[f]
1532 del mw[f]
1533
1533
1534 # If we're jumping between revisions (as opposed to merging),
1534 # If we're jumping between revisions (as opposed to merging),
1535 # and if neither the working directory nor the target rev has
1535 # and if neither the working directory nor the target rev has
1536 # the file, then we need to remove it from the dirstate, to
1536 # the file, then we need to remove it from the dirstate, to
1537 # prevent the dirstate from listing the file when it is no
1537 # prevent the dirstate from listing the file when it is no
1538 # longer in the manifest.
1538 # longer in the manifest.
1539 if moddirstate and linear_path and f not in m2:
1539 if moddirstate and linear_path and f not in m2:
1540 self.dirstate.forget((f,))
1540 self.dirstate.forget((f,))
1541
1541
1542 # Compare manifests
1542 # Compare manifests
1543 for f, n in mw.iteritems():
1543 for f, n in mw.iteritems():
1544 if choose and not choose(f):
1544 if choose and not choose(f):
1545 continue
1545 continue
1546 if f in m2:
1546 if f in m2:
1547 s = 0
1547 s = 0
1548
1548
1549 # is the wfile new since m1, and match m2?
1549 # is the wfile new since m1, and match m2?
1550 if f not in m1:
1550 if f not in m1:
1551 t1 = self.wread(f)
1551 t1 = self.wread(f)
1552 t2 = self.file(f).read(m2[f])
1552 t2 = self.file(f).read(m2[f])
1553 if cmp(t1, t2) == 0:
1553 if cmp(t1, t2) == 0:
1554 n = m2[f]
1554 n = m2[f]
1555 del t1, t2
1555 del t1, t2
1556
1556
1557 # are files different?
1557 # are files different?
1558 if n != m2[f]:
1558 if n != m2[f]:
1559 a = ma.get(f, nullid)
1559 a = ma.get(f, nullid)
1560 # are both different from the ancestor?
1560 # are both different from the ancestor?
1561 if n != a and m2[f] != a:
1561 if n != a and m2[f] != a:
1562 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1562 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1563 # merge executable bits
1563 # merge executable bits
1564 # "if we changed or they changed, change in merge"
1564 # "if we changed or they changed, change in merge"
1565 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1565 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1566 mode = ((a^b) | (a^c)) ^ a
1566 mode = ((a^b) | (a^c)) ^ a
1567 merge[f] = (m1.get(f, nullid), m2[f], mode)
1567 merge[f] = (m1.get(f, nullid), m2[f], mode)
1568 s = 1
1568 s = 1
1569 # are we clobbering?
1569 # are we clobbering?
1570 # is remote's version newer?
1570 # is remote's version newer?
1571 # or are we going back in time?
1571 # or are we going back in time?
1572 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1572 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1573 self.ui.debug(_(" remote %s is newer, get\n") % f)
1573 self.ui.debug(_(" remote %s is newer, get\n") % f)
1574 get[f] = m2[f]
1574 get[f] = m2[f]
1575 s = 1
1575 s = 1
1576 elif f in umap:
1576 elif f in umap or f in added:
1577 # this unknown file is the same as the checkout
1577 # this unknown file is the same as the checkout
1578 # we need to reset the dirstate if the file was added
1578 get[f] = m2[f]
1579 get[f] = m2[f]
1579
1580
1580 if not s and mfw[f] != mf2[f]:
1581 if not s and mfw[f] != mf2[f]:
1581 if force:
1582 if force:
1582 self.ui.debug(_(" updating permissions for %s\n") % f)
1583 self.ui.debug(_(" updating permissions for %s\n") % f)
1583 util.set_exec(self.wjoin(f), mf2[f])
1584 util.set_exec(self.wjoin(f), mf2[f])
1584 else:
1585 else:
1585 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1586 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1586 mode = ((a^b) | (a^c)) ^ a
1587 mode = ((a^b) | (a^c)) ^ a
1587 if mode != b:
1588 if mode != b:
1588 self.ui.debug(_(" updating permissions for %s\n")
1589 self.ui.debug(_(" updating permissions for %s\n")
1589 % f)
1590 % f)
1590 util.set_exec(self.wjoin(f), mode)
1591 util.set_exec(self.wjoin(f), mode)
1591 del m2[f]
1592 del m2[f]
1592 elif f in ma:
1593 elif f in ma:
1593 if n != ma[f]:
1594 if n != ma[f]:
1594 r = _("d")
1595 r = _("d")
1595 if not force and (linear_path or allow):
1596 if not force and (linear_path or allow):
1596 r = self.ui.prompt(
1597 r = self.ui.prompt(
1597 (_(" local changed %s which remote deleted\n") % f) +
1598 (_(" local changed %s which remote deleted\n") % f) +
1598 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1599 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1599 if r == _("d"):
1600 if r == _("d"):
1600 remove.append(f)
1601 remove.append(f)
1601 else:
1602 else:
1602 self.ui.debug(_("other deleted %s\n") % f)
1603 self.ui.debug(_("other deleted %s\n") % f)
1603 remove.append(f) # other deleted it
1604 remove.append(f) # other deleted it
1604 else:
1605 else:
1605 # file is created on branch or in working directory
1606 # file is created on branch or in working directory
1606 if force and f not in umap:
1607 if force and f not in umap:
1607 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1608 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1608 remove.append(f)
1609 remove.append(f)
1609 elif n == m1.get(f, nullid): # same as parent
1610 elif n == m1.get(f, nullid): # same as parent
1610 if p2 == pa: # going backwards?
1611 if p2 == pa: # going backwards?
1611 self.ui.debug(_("remote deleted %s\n") % f)
1612 self.ui.debug(_("remote deleted %s\n") % f)
1612 remove.append(f)
1613 remove.append(f)
1613 else:
1614 else:
1614 self.ui.debug(_("local modified %s, keeping\n") % f)
1615 self.ui.debug(_("local modified %s, keeping\n") % f)
1615 else:
1616 else:
1616 self.ui.debug(_("working dir created %s, keeping\n") % f)
1617 self.ui.debug(_("working dir created %s, keeping\n") % f)
1617
1618
1618 for f, n in m2.iteritems():
1619 for f, n in m2.iteritems():
1619 if choose and not choose(f):
1620 if choose and not choose(f):
1620 continue
1621 continue
1621 if f[0] == "/":
1622 if f[0] == "/":
1622 continue
1623 continue
1623 if f in ma and n != ma[f]:
1624 if f in ma and n != ma[f]:
1624 r = _("k")
1625 r = _("k")
1625 if not force and (linear_path or allow):
1626 if not force and (linear_path or allow):
1626 r = self.ui.prompt(
1627 r = self.ui.prompt(
1627 (_("remote changed %s which local deleted\n") % f) +
1628 (_("remote changed %s which local deleted\n") % f) +
1628 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1629 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1629 if r == _("k"):
1630 if r == _("k"):
1630 get[f] = n
1631 get[f] = n
1631 elif f not in ma:
1632 elif f not in ma:
1632 self.ui.debug(_("remote created %s\n") % f)
1633 self.ui.debug(_("remote created %s\n") % f)
1633 get[f] = n
1634 get[f] = n
1634 else:
1635 else:
1635 if force or p2 == pa: # going backwards?
1636 if force or p2 == pa: # going backwards?
1636 self.ui.debug(_("local deleted %s, recreating\n") % f)
1637 self.ui.debug(_("local deleted %s, recreating\n") % f)
1637 get[f] = n
1638 get[f] = n
1638 else:
1639 else:
1639 self.ui.debug(_("local deleted %s\n") % f)
1640 self.ui.debug(_("local deleted %s\n") % f)
1640
1641
1641 del mw, m1, m2, ma
1642 del mw, m1, m2, ma
1642
1643
1643 if force:
1644 if force:
1644 for f in merge:
1645 for f in merge:
1645 get[f] = merge[f][1]
1646 get[f] = merge[f][1]
1646 merge = {}
1647 merge = {}
1647
1648
1648 if linear_path or force:
1649 if linear_path or force:
1649 # we don't need to do any magic, just jump to the new rev
1650 # we don't need to do any magic, just jump to the new rev
1650 branch_merge = False
1651 branch_merge = False
1651 p1, p2 = p2, nullid
1652 p1, p2 = p2, nullid
1652 else:
1653 else:
1653 if not allow:
1654 if not allow:
1654 self.ui.status(_("this update spans a branch"
1655 self.ui.status(_("this update spans a branch"
1655 " affecting the following files:\n"))
1656 " affecting the following files:\n"))
1656 fl = merge.keys() + get.keys()
1657 fl = merge.keys() + get.keys()
1657 fl.sort()
1658 fl.sort()
1658 for f in fl:
1659 for f in fl:
1659 cf = ""
1660 cf = ""
1660 if f in merge:
1661 if f in merge:
1661 cf = _(" (resolve)")
1662 cf = _(" (resolve)")
1662 self.ui.status(" %s%s\n" % (f, cf))
1663 self.ui.status(" %s%s\n" % (f, cf))
1663 self.ui.warn(_("aborting update spanning branches!\n"))
1664 self.ui.warn(_("aborting update spanning branches!\n"))
1664 self.ui.status(_("(use 'hg merge' to merge across branches"
1665 self.ui.status(_("(use 'hg merge' to merge across branches"
1665 " or 'hg update -C' to lose changes)\n"))
1666 " or 'hg update -C' to lose changes)\n"))
1666 return 1
1667 return 1
1667 branch_merge = True
1668 branch_merge = True
1668
1669
1669 # get the files we don't need to change
1670 # get the files we don't need to change
1670 files = get.keys()
1671 files = get.keys()
1671 files.sort()
1672 files.sort()
1672 for f in files:
1673 for f in files:
1673 if f[0] == "/":
1674 if f[0] == "/":
1674 continue
1675 continue
1675 self.ui.note(_("getting %s\n") % f)
1676 self.ui.note(_("getting %s\n") % f)
1676 t = self.file(f).read(get[f])
1677 t = self.file(f).read(get[f])
1677 self.wwrite(f, t)
1678 self.wwrite(f, t)
1678 util.set_exec(self.wjoin(f), mf2[f])
1679 util.set_exec(self.wjoin(f), mf2[f])
1679 if moddirstate:
1680 if moddirstate:
1680 if branch_merge:
1681 if branch_merge:
1681 self.dirstate.update([f], 'n', st_mtime=-1)
1682 self.dirstate.update([f], 'n', st_mtime=-1)
1682 else:
1683 else:
1683 self.dirstate.update([f], 'n')
1684 self.dirstate.update([f], 'n')
1684
1685
1685 # merge the tricky bits
1686 # merge the tricky bits
1686 failedmerge = []
1687 failedmerge = []
1687 files = merge.keys()
1688 files = merge.keys()
1688 files.sort()
1689 files.sort()
1689 xp1 = hex(p1)
1690 xp1 = hex(p1)
1690 xp2 = hex(p2)
1691 xp2 = hex(p2)
1691 for f in files:
1692 for f in files:
1692 self.ui.status(_("merging %s\n") % f)
1693 self.ui.status(_("merging %s\n") % f)
1693 my, other, flag = merge[f]
1694 my, other, flag = merge[f]
1694 ret = self.merge3(f, my, other, xp1, xp2)
1695 ret = self.merge3(f, my, other, xp1, xp2)
1695 if ret:
1696 if ret:
1696 err = True
1697 err = True
1697 failedmerge.append(f)
1698 failedmerge.append(f)
1698 util.set_exec(self.wjoin(f), flag)
1699 util.set_exec(self.wjoin(f), flag)
1699 if moddirstate:
1700 if moddirstate:
1700 if branch_merge:
1701 if branch_merge:
1701 # We've done a branch merge, mark this file as merged
1702 # We've done a branch merge, mark this file as merged
1702 # so that we properly record the merger later
1703 # so that we properly record the merger later
1703 self.dirstate.update([f], 'm')
1704 self.dirstate.update([f], 'm')
1704 else:
1705 else:
1705 # We've update-merged a locally modified file, so
1706 # We've update-merged a locally modified file, so
1706 # we set the dirstate to emulate a normal checkout
1707 # we set the dirstate to emulate a normal checkout
1707 # of that file some time in the past. Thus our
1708 # of that file some time in the past. Thus our
1708 # merge will appear as a normal local file
1709 # merge will appear as a normal local file
1709 # modification.
1710 # modification.
1710 f_len = len(self.file(f).read(other))
1711 f_len = len(self.file(f).read(other))
1711 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1712 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1712
1713
1713 remove.sort()
1714 remove.sort()
1714 for f in remove:
1715 for f in remove:
1715 self.ui.note(_("removing %s\n") % f)
1716 self.ui.note(_("removing %s\n") % f)
1716 util.audit_path(f)
1717 util.audit_path(f)
1717 try:
1718 try:
1718 util.unlink(self.wjoin(f))
1719 util.unlink(self.wjoin(f))
1719 except OSError, inst:
1720 except OSError, inst:
1720 if inst.errno != errno.ENOENT:
1721 if inst.errno != errno.ENOENT:
1721 self.ui.warn(_("update failed to remove %s: %s!\n") %
1722 self.ui.warn(_("update failed to remove %s: %s!\n") %
1722 (f, inst.strerror))
1723 (f, inst.strerror))
1723 if moddirstate:
1724 if moddirstate:
1724 if branch_merge:
1725 if branch_merge:
1725 self.dirstate.update(remove, 'r')
1726 self.dirstate.update(remove, 'r')
1726 else:
1727 else:
1727 self.dirstate.forget(remove)
1728 self.dirstate.forget(remove)
1728
1729
1729 if moddirstate:
1730 if moddirstate:
1730 self.dirstate.setparents(p1, p2)
1731 self.dirstate.setparents(p1, p2)
1731
1732
1732 stat = ((len(get), _("updated")),
1733 stat = ((len(get), _("updated")),
1733 (len(merge) - len(failedmerge), _("merged")),
1734 (len(merge) - len(failedmerge), _("merged")),
1734 (len(remove), _("removed")),
1735 (len(remove), _("removed")),
1735 (len(failedmerge), _("unresolved")))
1736 (len(failedmerge), _("unresolved")))
1736 note = ", ".join([_("%d files %s") % s for s in stat])
1737 note = ", ".join([_("%d files %s") % s for s in stat])
1737 self.ui.note("%s\n" % note)
1738 self.ui.note("%s\n" % note)
1738 if moddirstate and branch_merge:
1739 if moddirstate and branch_merge:
1739 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1740 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1740
1741
1741 return err
1742 return err
1742
1743
1743 def merge3(self, fn, my, other, p1, p2):
1744 def merge3(self, fn, my, other, p1, p2):
1744 """perform a 3-way merge in the working directory"""
1745 """perform a 3-way merge in the working directory"""
1745
1746
1746 def temp(prefix, node):
1747 def temp(prefix, node):
1747 pre = "%s~%s." % (os.path.basename(fn), prefix)
1748 pre = "%s~%s." % (os.path.basename(fn), prefix)
1748 (fd, name) = tempfile.mkstemp("", pre)
1749 (fd, name) = tempfile.mkstemp("", pre)
1749 f = os.fdopen(fd, "wb")
1750 f = os.fdopen(fd, "wb")
1750 self.wwrite(fn, fl.read(node), f)
1751 self.wwrite(fn, fl.read(node), f)
1751 f.close()
1752 f.close()
1752 return name
1753 return name
1753
1754
1754 fl = self.file(fn)
1755 fl = self.file(fn)
1755 base = fl.ancestor(my, other)
1756 base = fl.ancestor(my, other)
1756 a = self.wjoin(fn)
1757 a = self.wjoin(fn)
1757 b = temp("base", base)
1758 b = temp("base", base)
1758 c = temp("other", other)
1759 c = temp("other", other)
1759
1760
1760 self.ui.note(_("resolving %s\n") % fn)
1761 self.ui.note(_("resolving %s\n") % fn)
1761 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1762 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1762 (fn, short(my), short(other), short(base)))
1763 (fn, short(my), short(other), short(base)))
1763
1764
1764 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1765 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1765 or "hgmerge")
1766 or "hgmerge")
1766 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1767 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1767 environ={'HG_FILE': fn,
1768 environ={'HG_FILE': fn,
1768 'HG_MY_NODE': p1,
1769 'HG_MY_NODE': p1,
1769 'HG_OTHER_NODE': p2,
1770 'HG_OTHER_NODE': p2,
1770 'HG_FILE_MY_NODE': hex(my),
1771 'HG_FILE_MY_NODE': hex(my),
1771 'HG_FILE_OTHER_NODE': hex(other),
1772 'HG_FILE_OTHER_NODE': hex(other),
1772 'HG_FILE_BASE_NODE': hex(base)})
1773 'HG_FILE_BASE_NODE': hex(base)})
1773 if r:
1774 if r:
1774 self.ui.warn(_("merging %s failed!\n") % fn)
1775 self.ui.warn(_("merging %s failed!\n") % fn)
1775
1776
1776 os.unlink(b)
1777 os.unlink(b)
1777 os.unlink(c)
1778 os.unlink(c)
1778 return r
1779 return r
1779
1780
1780 def verify(self):
1781 def verify(self):
1781 filelinkrevs = {}
1782 filelinkrevs = {}
1782 filenodes = {}
1783 filenodes = {}
1783 changesets = revisions = files = 0
1784 changesets = revisions = files = 0
1784 errors = [0]
1785 errors = [0]
1785 neededmanifests = {}
1786 neededmanifests = {}
1786
1787
1787 def err(msg):
1788 def err(msg):
1788 self.ui.warn(msg + "\n")
1789 self.ui.warn(msg + "\n")
1789 errors[0] += 1
1790 errors[0] += 1
1790
1791
1791 def checksize(obj, name):
1792 def checksize(obj, name):
1792 d = obj.checksize()
1793 d = obj.checksize()
1793 if d[0]:
1794 if d[0]:
1794 err(_("%s data length off by %d bytes") % (name, d[0]))
1795 err(_("%s data length off by %d bytes") % (name, d[0]))
1795 if d[1]:
1796 if d[1]:
1796 err(_("%s index contains %d extra bytes") % (name, d[1]))
1797 err(_("%s index contains %d extra bytes") % (name, d[1]))
1797
1798
1798 seen = {}
1799 seen = {}
1799 self.ui.status(_("checking changesets\n"))
1800 self.ui.status(_("checking changesets\n"))
1800 checksize(self.changelog, "changelog")
1801 checksize(self.changelog, "changelog")
1801
1802
1802 for i in range(self.changelog.count()):
1803 for i in range(self.changelog.count()):
1803 changesets += 1
1804 changesets += 1
1804 n = self.changelog.node(i)
1805 n = self.changelog.node(i)
1805 l = self.changelog.linkrev(n)
1806 l = self.changelog.linkrev(n)
1806 if l != i:
1807 if l != i:
1807 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1808 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1808 if n in seen:
1809 if n in seen:
1809 err(_("duplicate changeset at revision %d") % i)
1810 err(_("duplicate changeset at revision %d") % i)
1810 seen[n] = 1
1811 seen[n] = 1
1811
1812
1812 for p in self.changelog.parents(n):
1813 for p in self.changelog.parents(n):
1813 if p not in self.changelog.nodemap:
1814 if p not in self.changelog.nodemap:
1814 err(_("changeset %s has unknown parent %s") %
1815 err(_("changeset %s has unknown parent %s") %
1815 (short(n), short(p)))
1816 (short(n), short(p)))
1816 try:
1817 try:
1817 changes = self.changelog.read(n)
1818 changes = self.changelog.read(n)
1818 except KeyboardInterrupt:
1819 except KeyboardInterrupt:
1819 self.ui.warn(_("interrupted"))
1820 self.ui.warn(_("interrupted"))
1820 raise
1821 raise
1821 except Exception, inst:
1822 except Exception, inst:
1822 err(_("unpacking changeset %s: %s") % (short(n), inst))
1823 err(_("unpacking changeset %s: %s") % (short(n), inst))
1823 continue
1824 continue
1824
1825
1825 neededmanifests[changes[0]] = n
1826 neededmanifests[changes[0]] = n
1826
1827
1827 for f in changes[3]:
1828 for f in changes[3]:
1828 filelinkrevs.setdefault(f, []).append(i)
1829 filelinkrevs.setdefault(f, []).append(i)
1829
1830
1830 seen = {}
1831 seen = {}
1831 self.ui.status(_("checking manifests\n"))
1832 self.ui.status(_("checking manifests\n"))
1832 checksize(self.manifest, "manifest")
1833 checksize(self.manifest, "manifest")
1833
1834
1834 for i in range(self.manifest.count()):
1835 for i in range(self.manifest.count()):
1835 n = self.manifest.node(i)
1836 n = self.manifest.node(i)
1836 l = self.manifest.linkrev(n)
1837 l = self.manifest.linkrev(n)
1837
1838
1838 if l < 0 or l >= self.changelog.count():
1839 if l < 0 or l >= self.changelog.count():
1839 err(_("bad manifest link (%d) at revision %d") % (l, i))
1840 err(_("bad manifest link (%d) at revision %d") % (l, i))
1840
1841
1841 if n in neededmanifests:
1842 if n in neededmanifests:
1842 del neededmanifests[n]
1843 del neededmanifests[n]
1843
1844
1844 if n in seen:
1845 if n in seen:
1845 err(_("duplicate manifest at revision %d") % i)
1846 err(_("duplicate manifest at revision %d") % i)
1846
1847
1847 seen[n] = 1
1848 seen[n] = 1
1848
1849
1849 for p in self.manifest.parents(n):
1850 for p in self.manifest.parents(n):
1850 if p not in self.manifest.nodemap:
1851 if p not in self.manifest.nodemap:
1851 err(_("manifest %s has unknown parent %s") %
1852 err(_("manifest %s has unknown parent %s") %
1852 (short(n), short(p)))
1853 (short(n), short(p)))
1853
1854
1854 try:
1855 try:
1855 delta = mdiff.patchtext(self.manifest.delta(n))
1856 delta = mdiff.patchtext(self.manifest.delta(n))
1856 except KeyboardInterrupt:
1857 except KeyboardInterrupt:
1857 self.ui.warn(_("interrupted"))
1858 self.ui.warn(_("interrupted"))
1858 raise
1859 raise
1859 except Exception, inst:
1860 except Exception, inst:
1860 err(_("unpacking manifest %s: %s") % (short(n), inst))
1861 err(_("unpacking manifest %s: %s") % (short(n), inst))
1861 continue
1862 continue
1862
1863
1863 try:
1864 try:
1864 ff = [ l.split('\0') for l in delta.splitlines() ]
1865 ff = [ l.split('\0') for l in delta.splitlines() ]
1865 for f, fn in ff:
1866 for f, fn in ff:
1866 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1867 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1867 except (ValueError, TypeError), inst:
1868 except (ValueError, TypeError), inst:
1868 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1869 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1869
1870
1870 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1871 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1871
1872
1872 for m, c in neededmanifests.items():
1873 for m, c in neededmanifests.items():
1873 err(_("Changeset %s refers to unknown manifest %s") %
1874 err(_("Changeset %s refers to unknown manifest %s") %
1874 (short(m), short(c)))
1875 (short(m), short(c)))
1875 del neededmanifests
1876 del neededmanifests
1876
1877
1877 for f in filenodes:
1878 for f in filenodes:
1878 if f not in filelinkrevs:
1879 if f not in filelinkrevs:
1879 err(_("file %s in manifest but not in changesets") % f)
1880 err(_("file %s in manifest but not in changesets") % f)
1880
1881
1881 for f in filelinkrevs:
1882 for f in filelinkrevs:
1882 if f not in filenodes:
1883 if f not in filenodes:
1883 err(_("file %s in changeset but not in manifest") % f)
1884 err(_("file %s in changeset but not in manifest") % f)
1884
1885
1885 self.ui.status(_("checking files\n"))
1886 self.ui.status(_("checking files\n"))
1886 ff = filenodes.keys()
1887 ff = filenodes.keys()
1887 ff.sort()
1888 ff.sort()
1888 for f in ff:
1889 for f in ff:
1889 if f == "/dev/null":
1890 if f == "/dev/null":
1890 continue
1891 continue
1891 files += 1
1892 files += 1
1892 if not f:
1893 if not f:
1893 err(_("file without name in manifest %s") % short(n))
1894 err(_("file without name in manifest %s") % short(n))
1894 continue
1895 continue
1895 fl = self.file(f)
1896 fl = self.file(f)
1896 checksize(fl, f)
1897 checksize(fl, f)
1897
1898
1898 nodes = {nullid: 1}
1899 nodes = {nullid: 1}
1899 seen = {}
1900 seen = {}
1900 for i in range(fl.count()):
1901 for i in range(fl.count()):
1901 revisions += 1
1902 revisions += 1
1902 n = fl.node(i)
1903 n = fl.node(i)
1903
1904
1904 if n in seen:
1905 if n in seen:
1905 err(_("%s: duplicate revision %d") % (f, i))
1906 err(_("%s: duplicate revision %d") % (f, i))
1906 if n not in filenodes[f]:
1907 if n not in filenodes[f]:
1907 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1908 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1908 else:
1909 else:
1909 del filenodes[f][n]
1910 del filenodes[f][n]
1910
1911
1911 flr = fl.linkrev(n)
1912 flr = fl.linkrev(n)
1912 if flr not in filelinkrevs.get(f, []):
1913 if flr not in filelinkrevs.get(f, []):
1913 err(_("%s:%s points to unexpected changeset %d")
1914 err(_("%s:%s points to unexpected changeset %d")
1914 % (f, short(n), flr))
1915 % (f, short(n), flr))
1915 else:
1916 else:
1916 filelinkrevs[f].remove(flr)
1917 filelinkrevs[f].remove(flr)
1917
1918
1918 # verify contents
1919 # verify contents
1919 try:
1920 try:
1920 t = fl.read(n)
1921 t = fl.read(n)
1921 except KeyboardInterrupt:
1922 except KeyboardInterrupt:
1922 self.ui.warn(_("interrupted"))
1923 self.ui.warn(_("interrupted"))
1923 raise
1924 raise
1924 except Exception, inst:
1925 except Exception, inst:
1925 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1926 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1926
1927
1927 # verify parents
1928 # verify parents
1928 (p1, p2) = fl.parents(n)
1929 (p1, p2) = fl.parents(n)
1929 if p1 not in nodes:
1930 if p1 not in nodes:
1930 err(_("file %s:%s unknown parent 1 %s") %
1931 err(_("file %s:%s unknown parent 1 %s") %
1931 (f, short(n), short(p1)))
1932 (f, short(n), short(p1)))
1932 if p2 not in nodes:
1933 if p2 not in nodes:
1933 err(_("file %s:%s unknown parent 2 %s") %
1934 err(_("file %s:%s unknown parent 2 %s") %
1934 (f, short(n), short(p1)))
1935 (f, short(n), short(p1)))
1935 nodes[n] = 1
1936 nodes[n] = 1
1936
1937
1937 # cross-check
1938 # cross-check
1938 for node in filenodes[f]:
1939 for node in filenodes[f]:
1939 err(_("node %s in manifests not in %s") % (hex(node), f))
1940 err(_("node %s in manifests not in %s") % (hex(node), f))
1940
1941
1941 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1942 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1942 (files, changesets, revisions))
1943 (files, changesets, revisions))
1943
1944
1944 if errors[0]:
1945 if errors[0]:
1945 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1946 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1946 return 1
1947 return 1
1947
1948
1948 # used to avoid circular references so destructors work
1949 # used to avoid circular references so destructors work
1949 def aftertrans(base):
1950 def aftertrans(base):
1950 p = base
1951 p = base
1951 def a():
1952 def a():
1952 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1953 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1953 util.rename(os.path.join(p, "journal.dirstate"),
1954 util.rename(os.path.join(p, "journal.dirstate"),
1954 os.path.join(p, "undo.dirstate"))
1955 os.path.join(p, "undo.dirstate"))
1955 return a
1956 return a
1956
1957
@@ -1,57 +1,69 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 set -e
3 set -e
4 mkdir r1
4 mkdir r1
5 cd r1
5 cd r1
6 hg init
6 hg init
7 echo a > a
7 echo a > a
8 hg addremove
8 hg addremove
9 hg commit -m "1" -d "1000000 0"
9 hg commit -m "1" -d "1000000 0"
10
10
11 hg clone . ../r2
11 hg clone . ../r2
12 cd ../r2
12 cd ../r2
13 hg up
13 hg up
14 echo abc > a
14 echo abc > a
15 hg diff | sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
15 hg diff | sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
16 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/"
16 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/"
17
17
18 cd ../r1
18 cd ../r1
19 echo b > b
19 echo b > b
20 echo a2 > a
20 echo a2 > a
21 hg addremove
21 hg addremove
22 hg commit -m "2" -d "1000000 0"
22 hg commit -m "2" -d "1000000 0"
23
23
24 cd ../r2
24 cd ../r2
25 hg -q pull ../r1
25 hg -q pull ../r1
26 hg status
26 hg status
27 hg parents
27 hg parents
28 hg --debug up
28 hg --debug up
29 hg parents
29 hg parents
30 hg --debug up 0
30 hg --debug up 0
31 hg parents
31 hg parents
32 hg --debug up -m || echo failed
32 hg --debug up -m || echo failed
33 hg parents
33 hg parents
34 hg --debug up
34 hg --debug up
35 hg parents
35 hg parents
36 hg -v history
36 hg -v history
37 hg diff | sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
37 hg diff | sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
38 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/"
38 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/"
39
39
40 # create a second head
40 # create a second head
41 cd ../r1
41 cd ../r1
42 hg up 0
42 hg up 0
43 echo b2 > b
43 echo b2 > b
44 echo a3 > a
44 echo a3 > a
45 hg addremove
45 hg addremove
46 hg commit -m "3" -d "1000000 0"
46 hg commit -m "3" -d "1000000 0"
47
47
48 cd ../r2
48 cd ../r2
49 hg -q pull ../r1
49 hg -q pull ../r1
50 hg status
50 hg status
51 hg parents
51 hg parents
52 hg --debug up || echo failed
52 hg --debug up || echo failed
53 hg --debug up -m || echo failed
53 hg --debug up -m || echo failed
54 hg --debug up -f -m
54 hg --debug up -f -m
55 hg parents
55 hg parents
56 hg diff | sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
56 hg diff | sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
57 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/"
57 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/"
58
59 # test a local add
60 cd ..
61 hg init a
62 hg init b
63 echo a > a/a
64 echo a > b/a
65 hg --cwd a commit -A -m a
66 cd b
67 hg add a
68 hg pull -u ../a
69 hg st
@@ -1,138 +1,145 b''
1 adding a
1 adding a
2 diff -r 33aaa84a386b a
2 diff -r 33aaa84a386b a
3 --- a/a
3 --- a/a
4 +++ b/a
4 +++ b/a
5 @@ -1,1 +1,1 @@ a
5 @@ -1,1 +1,1 @@ a
6 -a
6 -a
7 +abc
7 +abc
8 adding b
8 adding b
9 M a
9 M a
10 changeset: 0:33aaa84a386b
10 changeset: 0:33aaa84a386b
11 user: test
11 user: test
12 date: Mon Jan 12 13:46:40 1970 +0000
12 date: Mon Jan 12 13:46:40 1970 +0000
13 summary: 1
13 summary: 1
14
14
15 resolving manifests
15 resolving manifests
16 force None allow None moddirstate True linear True
16 force None allow None moddirstate True linear True
17 ancestor a0c8bcbbb45c local a0c8bcbbb45c remote 1165e8bd193e
17 ancestor a0c8bcbbb45c local a0c8bcbbb45c remote 1165e8bd193e
18 a versions differ, resolve
18 a versions differ, resolve
19 remote created b
19 remote created b
20 getting b
20 getting b
21 merging a
21 merging a
22 resolving a
22 resolving a
23 file a: my b789fdd96dc2 other d730145abbf9 ancestor b789fdd96dc2
23 file a: my b789fdd96dc2 other d730145abbf9 ancestor b789fdd96dc2
24 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
24 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
25 changeset: 1:802f095af299
25 changeset: 1:802f095af299
26 tag: tip
26 tag: tip
27 user: test
27 user: test
28 date: Mon Jan 12 13:46:40 1970 +0000
28 date: Mon Jan 12 13:46:40 1970 +0000
29 summary: 2
29 summary: 2
30
30
31 resolving manifests
31 resolving manifests
32 force None allow None moddirstate True linear True
32 force None allow None moddirstate True linear True
33 ancestor a0c8bcbbb45c local 1165e8bd193e remote a0c8bcbbb45c
33 ancestor a0c8bcbbb45c local 1165e8bd193e remote a0c8bcbbb45c
34 remote deleted b
34 remote deleted b
35 removing b
35 removing b
36 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
36 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
37 changeset: 0:33aaa84a386b
37 changeset: 0:33aaa84a386b
38 user: test
38 user: test
39 date: Mon Jan 12 13:46:40 1970 +0000
39 date: Mon Jan 12 13:46:40 1970 +0000
40 summary: 1
40 summary: 1
41
41
42 abort: there is nothing to merge, just use 'hg update'
42 abort: there is nothing to merge, just use 'hg update'
43 failed
43 failed
44 changeset: 0:33aaa84a386b
44 changeset: 0:33aaa84a386b
45 user: test
45 user: test
46 date: Mon Jan 12 13:46:40 1970 +0000
46 date: Mon Jan 12 13:46:40 1970 +0000
47 summary: 1
47 summary: 1
48
48
49 resolving manifests
49 resolving manifests
50 force None allow None moddirstate True linear True
50 force None allow None moddirstate True linear True
51 ancestor a0c8bcbbb45c local a0c8bcbbb45c remote 1165e8bd193e
51 ancestor a0c8bcbbb45c local a0c8bcbbb45c remote 1165e8bd193e
52 a versions differ, resolve
52 a versions differ, resolve
53 remote created b
53 remote created b
54 getting b
54 getting b
55 merging a
55 merging a
56 resolving a
56 resolving a
57 file a: my b789fdd96dc2 other d730145abbf9 ancestor b789fdd96dc2
57 file a: my b789fdd96dc2 other d730145abbf9 ancestor b789fdd96dc2
58 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
58 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
59 changeset: 1:802f095af299
59 changeset: 1:802f095af299
60 tag: tip
60 tag: tip
61 user: test
61 user: test
62 date: Mon Jan 12 13:46:40 1970 +0000
62 date: Mon Jan 12 13:46:40 1970 +0000
63 summary: 2
63 summary: 2
64
64
65 changeset: 1:802f095af299cde27a85b2f056aef3829870956c
65 changeset: 1:802f095af299cde27a85b2f056aef3829870956c
66 tag: tip
66 tag: tip
67 user: test
67 user: test
68 date: Mon Jan 12 13:46:40 1970 +0000
68 date: Mon Jan 12 13:46:40 1970 +0000
69 files: a b
69 files: a b
70 description:
70 description:
71 2
71 2
72
72
73
73
74 changeset: 0:33aaa84a386bd609094aeb21a97c09436c482ef1
74 changeset: 0:33aaa84a386bd609094aeb21a97c09436c482ef1
75 user: test
75 user: test
76 date: Mon Jan 12 13:46:40 1970 +0000
76 date: Mon Jan 12 13:46:40 1970 +0000
77 files: a
77 files: a
78 description:
78 description:
79 1
79 1
80
80
81
81
82 diff -r 802f095af299 a
82 diff -r 802f095af299 a
83 --- a/a
83 --- a/a
84 +++ b/a
84 +++ b/a
85 @@ -1,1 +1,1 @@ a2
85 @@ -1,1 +1,1 @@ a2
86 -a2
86 -a2
87 +abc
87 +abc
88 adding b
88 adding b
89 M a
89 M a
90 changeset: 1:802f095af299
90 changeset: 1:802f095af299
91 user: test
91 user: test
92 date: Mon Jan 12 13:46:40 1970 +0000
92 date: Mon Jan 12 13:46:40 1970 +0000
93 summary: 2
93 summary: 2
94
94
95 resolving manifests
95 resolving manifests
96 force None allow None moddirstate True linear False
96 force None allow None moddirstate True linear False
97 ancestor a0c8bcbbb45c local 1165e8bd193e remote 4096f2872392
97 ancestor a0c8bcbbb45c local 1165e8bd193e remote 4096f2872392
98 a versions differ, resolve
98 a versions differ, resolve
99 b versions differ, resolve
99 b versions differ, resolve
100 this update spans a branch affecting the following files:
100 this update spans a branch affecting the following files:
101 a (resolve)
101 a (resolve)
102 b (resolve)
102 b (resolve)
103 aborting update spanning branches!
103 aborting update spanning branches!
104 (use 'hg merge' to merge across branches or 'hg update -C' to lose changes)
104 (use 'hg merge' to merge across branches or 'hg update -C' to lose changes)
105 failed
105 failed
106 abort: outstanding uncommitted changes
106 abort: outstanding uncommitted changes
107 failed
107 failed
108 resolving manifests
108 resolving manifests
109 force None allow 1 moddirstate True linear False
109 force None allow 1 moddirstate True linear False
110 ancestor a0c8bcbbb45c local 1165e8bd193e remote 4096f2872392
110 ancestor a0c8bcbbb45c local 1165e8bd193e remote 4096f2872392
111 a versions differ, resolve
111 a versions differ, resolve
112 b versions differ, resolve
112 b versions differ, resolve
113 merging a
113 merging a
114 resolving a
114 resolving a
115 file a: my d730145abbf9 other 13e0d5f949fa ancestor b789fdd96dc2
115 file a: my d730145abbf9 other 13e0d5f949fa ancestor b789fdd96dc2
116 merging b
116 merging b
117 resolving b
117 resolving b
118 file b: my 1e88685f5dde other 61de8c7723ca ancestor 000000000000
118 file b: my 1e88685f5dde other 61de8c7723ca ancestor 000000000000
119 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
119 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
120 (branch merge, don't forget to commit)
120 (branch merge, don't forget to commit)
121 changeset: 1:802f095af299
121 changeset: 1:802f095af299
122 user: test
122 user: test
123 date: Mon Jan 12 13:46:40 1970 +0000
123 date: Mon Jan 12 13:46:40 1970 +0000
124 summary: 2
124 summary: 2
125
125
126 changeset: 2:030602aee63d
126 changeset: 2:030602aee63d
127 tag: tip
127 tag: tip
128 parent: 0:33aaa84a386b
128 parent: 0:33aaa84a386b
129 user: test
129 user: test
130 date: Mon Jan 12 13:46:40 1970 +0000
130 date: Mon Jan 12 13:46:40 1970 +0000
131 summary: 3
131 summary: 3
132
132
133 diff -r 802f095af299 a
133 diff -r 802f095af299 a
134 --- a/a
134 --- a/a
135 +++ b/a
135 +++ b/a
136 @@ -1,1 +1,1 @@ a2
136 @@ -1,1 +1,1 @@ a2
137 -a2
137 -a2
138 +abc
138 +abc
139 adding a
140 pulling from ../a
141 requesting all changes
142 adding changesets
143 adding manifests
144 adding file changes
145 added 1 changesets with 1 changes to 1 files
General Comments 0
You need to be logged in to leave comments. Login now