##// END OF EJS Templates
Fix hg push and hg push -r sometimes creating new heads without --force....
Thomas Arendsen Hein -
r2021:fc22ed56 default
parent child Browse files
Show More
@@ -1,1923 +1,1949 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "appendfile changegroup")
15
15
16 class localrepository(object):
16 class localrepository(object):
17 def __del__(self):
17 def __del__(self):
18 self.transhandle = None
18 self.transhandle = None
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 if not path:
20 if not path:
21 p = os.getcwd()
21 p = os.getcwd()
22 while not os.path.isdir(os.path.join(p, ".hg")):
22 while not os.path.isdir(os.path.join(p, ".hg")):
23 oldp = p
23 oldp = p
24 p = os.path.dirname(p)
24 p = os.path.dirname(p)
25 if p == oldp:
25 if p == oldp:
26 raise repo.RepoError(_("no repo found"))
26 raise repo.RepoError(_("no repo found"))
27 path = p
27 path = p
28 self.path = os.path.join(path, ".hg")
28 self.path = os.path.join(path, ".hg")
29
29
30 if not create and not os.path.isdir(self.path):
30 if not create and not os.path.isdir(self.path):
31 raise repo.RepoError(_("repository %s not found") % path)
31 raise repo.RepoError(_("repository %s not found") % path)
32
32
33 self.root = os.path.abspath(path)
33 self.root = os.path.abspath(path)
34 self.origroot = path
34 self.origroot = path
35 self.ui = ui.ui(parentui=parentui)
35 self.ui = ui.ui(parentui=parentui)
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38 self.manifest = manifest.manifest(self.opener)
38 self.manifest = manifest.manifest(self.opener)
39 self.changelog = changelog.changelog(self.opener)
39 self.changelog = changelog.changelog(self.opener)
40 self.tagscache = None
40 self.tagscache = None
41 self.nodetagscache = None
41 self.nodetagscache = None
42 self.encodepats = None
42 self.encodepats = None
43 self.decodepats = None
43 self.decodepats = None
44 self.transhandle = None
44 self.transhandle = None
45
45
46 if create:
46 if create:
47 os.mkdir(self.path)
47 os.mkdir(self.path)
48 os.mkdir(self.join("data"))
48 os.mkdir(self.join("data"))
49
49
50 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
50 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
51 try:
51 try:
52 self.ui.readconfig(self.join("hgrc"), self.root)
52 self.ui.readconfig(self.join("hgrc"), self.root)
53 except IOError:
53 except IOError:
54 pass
54 pass
55
55
56 def hook(self, name, throw=False, **args):
56 def hook(self, name, throw=False, **args):
57 def runhook(name, cmd):
57 def runhook(name, cmd):
58 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
58 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
59 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
59 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
60 [(k.upper(), v) for k, v in args.iteritems()])
60 [(k.upper(), v) for k, v in args.iteritems()])
61 r = util.system(cmd, environ=env, cwd=self.root)
61 r = util.system(cmd, environ=env, cwd=self.root)
62 if r:
62 if r:
63 desc, r = util.explain_exit(r)
63 desc, r = util.explain_exit(r)
64 if throw:
64 if throw:
65 raise util.Abort(_('%s hook %s') % (name, desc))
65 raise util.Abort(_('%s hook %s') % (name, desc))
66 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
66 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
67 return False
67 return False
68 return True
68 return True
69
69
70 r = True
70 r = True
71 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
71 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
72 if hname.split(".", 1)[0] == name and cmd]
72 if hname.split(".", 1)[0] == name and cmd]
73 hooks.sort()
73 hooks.sort()
74 for hname, cmd in hooks:
74 for hname, cmd in hooks:
75 r = runhook(hname, cmd) and r
75 r = runhook(hname, cmd) and r
76 return r
76 return r
77
77
78 def tags(self):
78 def tags(self):
79 '''return a mapping of tag to node'''
79 '''return a mapping of tag to node'''
80 if not self.tagscache:
80 if not self.tagscache:
81 self.tagscache = {}
81 self.tagscache = {}
82
82
83 def parsetag(line, context):
83 def parsetag(line, context):
84 if not line:
84 if not line:
85 return
85 return
86 s = l.split(" ", 1)
86 s = l.split(" ", 1)
87 if len(s) != 2:
87 if len(s) != 2:
88 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
88 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
89 return
89 return
90 node, key = s
90 node, key = s
91 try:
91 try:
92 bin_n = bin(node)
92 bin_n = bin(node)
93 except TypeError:
93 except TypeError:
94 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
94 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
95 return
95 return
96 if bin_n not in self.changelog.nodemap:
96 if bin_n not in self.changelog.nodemap:
97 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
97 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
98 return
98 return
99 self.tagscache[key.strip()] = bin_n
99 self.tagscache[key.strip()] = bin_n
100
100
101 # read each head of the tags file, ending with the tip
101 # read each head of the tags file, ending with the tip
102 # and add each tag found to the map, with "newer" ones
102 # and add each tag found to the map, with "newer" ones
103 # taking precedence
103 # taking precedence
104 fl = self.file(".hgtags")
104 fl = self.file(".hgtags")
105 h = fl.heads()
105 h = fl.heads()
106 h.reverse()
106 h.reverse()
107 for r in h:
107 for r in h:
108 count = 0
108 count = 0
109 for l in fl.read(r).splitlines():
109 for l in fl.read(r).splitlines():
110 count += 1
110 count += 1
111 parsetag(l, ".hgtags:%d" % count)
111 parsetag(l, ".hgtags:%d" % count)
112
112
113 try:
113 try:
114 f = self.opener("localtags")
114 f = self.opener("localtags")
115 count = 0
115 count = 0
116 for l in f:
116 for l in f:
117 count += 1
117 count += 1
118 parsetag(l, "localtags:%d" % count)
118 parsetag(l, "localtags:%d" % count)
119 except IOError:
119 except IOError:
120 pass
120 pass
121
121
122 self.tagscache['tip'] = self.changelog.tip()
122 self.tagscache['tip'] = self.changelog.tip()
123
123
124 return self.tagscache
124 return self.tagscache
125
125
126 def tagslist(self):
126 def tagslist(self):
127 '''return a list of tags ordered by revision'''
127 '''return a list of tags ordered by revision'''
128 l = []
128 l = []
129 for t, n in self.tags().items():
129 for t, n in self.tags().items():
130 try:
130 try:
131 r = self.changelog.rev(n)
131 r = self.changelog.rev(n)
132 except:
132 except:
133 r = -2 # sort to the beginning of the list if unknown
133 r = -2 # sort to the beginning of the list if unknown
134 l.append((r, t, n))
134 l.append((r, t, n))
135 l.sort()
135 l.sort()
136 return [(t, n) for r, t, n in l]
136 return [(t, n) for r, t, n in l]
137
137
138 def nodetags(self, node):
138 def nodetags(self, node):
139 '''return the tags associated with a node'''
139 '''return the tags associated with a node'''
140 if not self.nodetagscache:
140 if not self.nodetagscache:
141 self.nodetagscache = {}
141 self.nodetagscache = {}
142 for t, n in self.tags().items():
142 for t, n in self.tags().items():
143 self.nodetagscache.setdefault(n, []).append(t)
143 self.nodetagscache.setdefault(n, []).append(t)
144 return self.nodetagscache.get(node, [])
144 return self.nodetagscache.get(node, [])
145
145
146 def lookup(self, key):
146 def lookup(self, key):
147 try:
147 try:
148 return self.tags()[key]
148 return self.tags()[key]
149 except KeyError:
149 except KeyError:
150 try:
150 try:
151 return self.changelog.lookup(key)
151 return self.changelog.lookup(key)
152 except:
152 except:
153 raise repo.RepoError(_("unknown revision '%s'") % key)
153 raise repo.RepoError(_("unknown revision '%s'") % key)
154
154
155 def dev(self):
155 def dev(self):
156 return os.stat(self.path).st_dev
156 return os.stat(self.path).st_dev
157
157
158 def local(self):
158 def local(self):
159 return True
159 return True
160
160
161 def join(self, f):
161 def join(self, f):
162 return os.path.join(self.path, f)
162 return os.path.join(self.path, f)
163
163
164 def wjoin(self, f):
164 def wjoin(self, f):
165 return os.path.join(self.root, f)
165 return os.path.join(self.root, f)
166
166
167 def file(self, f):
167 def file(self, f):
168 if f[0] == '/':
168 if f[0] == '/':
169 f = f[1:]
169 f = f[1:]
170 return filelog.filelog(self.opener, f)
170 return filelog.filelog(self.opener, f)
171
171
172 def getcwd(self):
172 def getcwd(self):
173 return self.dirstate.getcwd()
173 return self.dirstate.getcwd()
174
174
175 def wfile(self, f, mode='r'):
175 def wfile(self, f, mode='r'):
176 return self.wopener(f, mode)
176 return self.wopener(f, mode)
177
177
178 def wread(self, filename):
178 def wread(self, filename):
179 if self.encodepats == None:
179 if self.encodepats == None:
180 l = []
180 l = []
181 for pat, cmd in self.ui.configitems("encode"):
181 for pat, cmd in self.ui.configitems("encode"):
182 mf = util.matcher(self.root, "", [pat], [], [])[1]
182 mf = util.matcher(self.root, "", [pat], [], [])[1]
183 l.append((mf, cmd))
183 l.append((mf, cmd))
184 self.encodepats = l
184 self.encodepats = l
185
185
186 data = self.wopener(filename, 'r').read()
186 data = self.wopener(filename, 'r').read()
187
187
188 for mf, cmd in self.encodepats:
188 for mf, cmd in self.encodepats:
189 if mf(filename):
189 if mf(filename):
190 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
190 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
191 data = util.filter(data, cmd)
191 data = util.filter(data, cmd)
192 break
192 break
193
193
194 return data
194 return data
195
195
196 def wwrite(self, filename, data, fd=None):
196 def wwrite(self, filename, data, fd=None):
197 if self.decodepats == None:
197 if self.decodepats == None:
198 l = []
198 l = []
199 for pat, cmd in self.ui.configitems("decode"):
199 for pat, cmd in self.ui.configitems("decode"):
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
200 mf = util.matcher(self.root, "", [pat], [], [])[1]
201 l.append((mf, cmd))
201 l.append((mf, cmd))
202 self.decodepats = l
202 self.decodepats = l
203
203
204 for mf, cmd in self.decodepats:
204 for mf, cmd in self.decodepats:
205 if mf(filename):
205 if mf(filename):
206 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
206 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
207 data = util.filter(data, cmd)
207 data = util.filter(data, cmd)
208 break
208 break
209
209
210 if fd:
210 if fd:
211 return fd.write(data)
211 return fd.write(data)
212 return self.wopener(filename, 'w').write(data)
212 return self.wopener(filename, 'w').write(data)
213
213
214 def transaction(self):
214 def transaction(self):
215 tr = self.transhandle
215 tr = self.transhandle
216 if tr != None and tr.running():
216 if tr != None and tr.running():
217 return tr.nest()
217 return tr.nest()
218
218
219 # save dirstate for undo
219 # save dirstate for undo
220 try:
220 try:
221 ds = self.opener("dirstate").read()
221 ds = self.opener("dirstate").read()
222 except IOError:
222 except IOError:
223 ds = ""
223 ds = ""
224 self.opener("journal.dirstate", "w").write(ds)
224 self.opener("journal.dirstate", "w").write(ds)
225
225
226 tr = transaction.transaction(self.ui.warn, self.opener,
226 tr = transaction.transaction(self.ui.warn, self.opener,
227 self.join("journal"),
227 self.join("journal"),
228 aftertrans(self.path))
228 aftertrans(self.path))
229 self.transhandle = tr
229 self.transhandle = tr
230 return tr
230 return tr
231
231
232 def recover(self):
232 def recover(self):
233 l = self.lock()
233 l = self.lock()
234 if os.path.exists(self.join("journal")):
234 if os.path.exists(self.join("journal")):
235 self.ui.status(_("rolling back interrupted transaction\n"))
235 self.ui.status(_("rolling back interrupted transaction\n"))
236 transaction.rollback(self.opener, self.join("journal"))
236 transaction.rollback(self.opener, self.join("journal"))
237 self.reload()
237 self.reload()
238 return True
238 return True
239 else:
239 else:
240 self.ui.warn(_("no interrupted transaction available\n"))
240 self.ui.warn(_("no interrupted transaction available\n"))
241 return False
241 return False
242
242
243 def undo(self, wlock=None):
243 def undo(self, wlock=None):
244 if not wlock:
244 if not wlock:
245 wlock = self.wlock()
245 wlock = self.wlock()
246 l = self.lock()
246 l = self.lock()
247 if os.path.exists(self.join("undo")):
247 if os.path.exists(self.join("undo")):
248 self.ui.status(_("rolling back last transaction\n"))
248 self.ui.status(_("rolling back last transaction\n"))
249 transaction.rollback(self.opener, self.join("undo"))
249 transaction.rollback(self.opener, self.join("undo"))
250 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
250 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
251 self.reload()
251 self.reload()
252 self.wreload()
252 self.wreload()
253 else:
253 else:
254 self.ui.warn(_("no undo information available\n"))
254 self.ui.warn(_("no undo information available\n"))
255
255
256 def wreload(self):
256 def wreload(self):
257 self.dirstate.read()
257 self.dirstate.read()
258
258
259 def reload(self):
259 def reload(self):
260 self.changelog.load()
260 self.changelog.load()
261 self.manifest.load()
261 self.manifest.load()
262 self.tagscache = None
262 self.tagscache = None
263 self.nodetagscache = None
263 self.nodetagscache = None
264
264
265 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
265 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
266 desc=None):
266 desc=None):
267 try:
267 try:
268 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
268 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
269 except lock.LockHeld, inst:
269 except lock.LockHeld, inst:
270 if not wait:
270 if not wait:
271 raise
271 raise
272 self.ui.warn(_("waiting for lock on %s held by %s\n") %
272 self.ui.warn(_("waiting for lock on %s held by %s\n") %
273 (desc, inst.args[0]))
273 (desc, inst.args[0]))
274 # default to 600 seconds timeout
274 # default to 600 seconds timeout
275 l = lock.lock(self.join(lockname),
275 l = lock.lock(self.join(lockname),
276 int(self.ui.config("ui", "timeout") or 600),
276 int(self.ui.config("ui", "timeout") or 600),
277 releasefn, desc=desc)
277 releasefn, desc=desc)
278 if acquirefn:
278 if acquirefn:
279 acquirefn()
279 acquirefn()
280 return l
280 return l
281
281
282 def lock(self, wait=1):
282 def lock(self, wait=1):
283 return self.do_lock("lock", wait, acquirefn=self.reload,
283 return self.do_lock("lock", wait, acquirefn=self.reload,
284 desc=_('repository %s') % self.origroot)
284 desc=_('repository %s') % self.origroot)
285
285
286 def wlock(self, wait=1):
286 def wlock(self, wait=1):
287 return self.do_lock("wlock", wait, self.dirstate.write,
287 return self.do_lock("wlock", wait, self.dirstate.write,
288 self.wreload,
288 self.wreload,
289 desc=_('working directory of %s') % self.origroot)
289 desc=_('working directory of %s') % self.origroot)
290
290
291 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
291 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
292 "determine whether a new filenode is needed"
292 "determine whether a new filenode is needed"
293 fp1 = manifest1.get(filename, nullid)
293 fp1 = manifest1.get(filename, nullid)
294 fp2 = manifest2.get(filename, nullid)
294 fp2 = manifest2.get(filename, nullid)
295
295
296 if fp2 != nullid:
296 if fp2 != nullid:
297 # is one parent an ancestor of the other?
297 # is one parent an ancestor of the other?
298 fpa = filelog.ancestor(fp1, fp2)
298 fpa = filelog.ancestor(fp1, fp2)
299 if fpa == fp1:
299 if fpa == fp1:
300 fp1, fp2 = fp2, nullid
300 fp1, fp2 = fp2, nullid
301 elif fpa == fp2:
301 elif fpa == fp2:
302 fp2 = nullid
302 fp2 = nullid
303
303
304 # is the file unmodified from the parent? report existing entry
304 # is the file unmodified from the parent? report existing entry
305 if fp2 == nullid and text == filelog.read(fp1):
305 if fp2 == nullid and text == filelog.read(fp1):
306 return (fp1, None, None)
306 return (fp1, None, None)
307
307
308 return (None, fp1, fp2)
308 return (None, fp1, fp2)
309
309
310 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
310 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
311 orig_parent = self.dirstate.parents()[0] or nullid
311 orig_parent = self.dirstate.parents()[0] or nullid
312 p1 = p1 or self.dirstate.parents()[0] or nullid
312 p1 = p1 or self.dirstate.parents()[0] or nullid
313 p2 = p2 or self.dirstate.parents()[1] or nullid
313 p2 = p2 or self.dirstate.parents()[1] or nullid
314 c1 = self.changelog.read(p1)
314 c1 = self.changelog.read(p1)
315 c2 = self.changelog.read(p2)
315 c2 = self.changelog.read(p2)
316 m1 = self.manifest.read(c1[0])
316 m1 = self.manifest.read(c1[0])
317 mf1 = self.manifest.readflags(c1[0])
317 mf1 = self.manifest.readflags(c1[0])
318 m2 = self.manifest.read(c2[0])
318 m2 = self.manifest.read(c2[0])
319 changed = []
319 changed = []
320
320
321 if orig_parent == p1:
321 if orig_parent == p1:
322 update_dirstate = 1
322 update_dirstate = 1
323 else:
323 else:
324 update_dirstate = 0
324 update_dirstate = 0
325
325
326 if not wlock:
326 if not wlock:
327 wlock = self.wlock()
327 wlock = self.wlock()
328 l = self.lock()
328 l = self.lock()
329 tr = self.transaction()
329 tr = self.transaction()
330 mm = m1.copy()
330 mm = m1.copy()
331 mfm = mf1.copy()
331 mfm = mf1.copy()
332 linkrev = self.changelog.count()
332 linkrev = self.changelog.count()
333 for f in files:
333 for f in files:
334 try:
334 try:
335 t = self.wread(f)
335 t = self.wread(f)
336 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
336 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
337 r = self.file(f)
337 r = self.file(f)
338 mfm[f] = tm
338 mfm[f] = tm
339
339
340 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
340 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
341 if entry:
341 if entry:
342 mm[f] = entry
342 mm[f] = entry
343 continue
343 continue
344
344
345 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
345 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
346 changed.append(f)
346 changed.append(f)
347 if update_dirstate:
347 if update_dirstate:
348 self.dirstate.update([f], "n")
348 self.dirstate.update([f], "n")
349 except IOError:
349 except IOError:
350 try:
350 try:
351 del mm[f]
351 del mm[f]
352 del mfm[f]
352 del mfm[f]
353 if update_dirstate:
353 if update_dirstate:
354 self.dirstate.forget([f])
354 self.dirstate.forget([f])
355 except:
355 except:
356 # deleted from p2?
356 # deleted from p2?
357 pass
357 pass
358
358
359 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
359 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
360 user = user or self.ui.username()
360 user = user or self.ui.username()
361 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
361 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
362 tr.close()
362 tr.close()
363 if update_dirstate:
363 if update_dirstate:
364 self.dirstate.setparents(n, nullid)
364 self.dirstate.setparents(n, nullid)
365
365
366 def commit(self, files=None, text="", user=None, date=None,
366 def commit(self, files=None, text="", user=None, date=None,
367 match=util.always, force=False, lock=None, wlock=None):
367 match=util.always, force=False, lock=None, wlock=None):
368 commit = []
368 commit = []
369 remove = []
369 remove = []
370 changed = []
370 changed = []
371
371
372 if files:
372 if files:
373 for f in files:
373 for f in files:
374 s = self.dirstate.state(f)
374 s = self.dirstate.state(f)
375 if s in 'nmai':
375 if s in 'nmai':
376 commit.append(f)
376 commit.append(f)
377 elif s == 'r':
377 elif s == 'r':
378 remove.append(f)
378 remove.append(f)
379 else:
379 else:
380 self.ui.warn(_("%s not tracked!\n") % f)
380 self.ui.warn(_("%s not tracked!\n") % f)
381 else:
381 else:
382 modified, added, removed, deleted, unknown = self.changes(match=match)
382 modified, added, removed, deleted, unknown = self.changes(match=match)
383 commit = modified + added
383 commit = modified + added
384 remove = removed
384 remove = removed
385
385
386 p1, p2 = self.dirstate.parents()
386 p1, p2 = self.dirstate.parents()
387 c1 = self.changelog.read(p1)
387 c1 = self.changelog.read(p1)
388 c2 = self.changelog.read(p2)
388 c2 = self.changelog.read(p2)
389 m1 = self.manifest.read(c1[0])
389 m1 = self.manifest.read(c1[0])
390 mf1 = self.manifest.readflags(c1[0])
390 mf1 = self.manifest.readflags(c1[0])
391 m2 = self.manifest.read(c2[0])
391 m2 = self.manifest.read(c2[0])
392
392
393 if not commit and not remove and not force and p2 == nullid:
393 if not commit and not remove and not force and p2 == nullid:
394 self.ui.status(_("nothing changed\n"))
394 self.ui.status(_("nothing changed\n"))
395 return None
395 return None
396
396
397 xp1 = hex(p1)
397 xp1 = hex(p1)
398 if p2 == nullid: xp2 = ''
398 if p2 == nullid: xp2 = ''
399 else: xp2 = hex(p2)
399 else: xp2 = hex(p2)
400
400
401 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
401 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
402
402
403 if not wlock:
403 if not wlock:
404 wlock = self.wlock()
404 wlock = self.wlock()
405 if not lock:
405 if not lock:
406 lock = self.lock()
406 lock = self.lock()
407 tr = self.transaction()
407 tr = self.transaction()
408
408
409 # check in files
409 # check in files
410 new = {}
410 new = {}
411 linkrev = self.changelog.count()
411 linkrev = self.changelog.count()
412 commit.sort()
412 commit.sort()
413 for f in commit:
413 for f in commit:
414 self.ui.note(f + "\n")
414 self.ui.note(f + "\n")
415 try:
415 try:
416 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
416 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
417 t = self.wread(f)
417 t = self.wread(f)
418 except IOError:
418 except IOError:
419 self.ui.warn(_("trouble committing %s!\n") % f)
419 self.ui.warn(_("trouble committing %s!\n") % f)
420 raise
420 raise
421
421
422 r = self.file(f)
422 r = self.file(f)
423
423
424 meta = {}
424 meta = {}
425 cp = self.dirstate.copied(f)
425 cp = self.dirstate.copied(f)
426 if cp:
426 if cp:
427 meta["copy"] = cp
427 meta["copy"] = cp
428 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
428 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
429 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
429 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
430 fp1, fp2 = nullid, nullid
430 fp1, fp2 = nullid, nullid
431 else:
431 else:
432 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
432 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
433 if entry:
433 if entry:
434 new[f] = entry
434 new[f] = entry
435 continue
435 continue
436
436
437 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
437 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
438 # remember what we've added so that we can later calculate
438 # remember what we've added so that we can later calculate
439 # the files to pull from a set of changesets
439 # the files to pull from a set of changesets
440 changed.append(f)
440 changed.append(f)
441
441
442 # update manifest
442 # update manifest
443 m1 = m1.copy()
443 m1 = m1.copy()
444 m1.update(new)
444 m1.update(new)
445 for f in remove:
445 for f in remove:
446 if f in m1:
446 if f in m1:
447 del m1[f]
447 del m1[f]
448 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
448 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
449 (new, remove))
449 (new, remove))
450
450
451 # add changeset
451 # add changeset
452 new = new.keys()
452 new = new.keys()
453 new.sort()
453 new.sort()
454
454
455 user = user or self.ui.username()
455 user = user or self.ui.username()
456 if not text:
456 if not text:
457 edittext = [""]
457 edittext = [""]
458 if p2 != nullid:
458 if p2 != nullid:
459 edittext.append("HG: branch merge")
459 edittext.append("HG: branch merge")
460 edittext.extend(["HG: changed %s" % f for f in changed])
460 edittext.extend(["HG: changed %s" % f for f in changed])
461 edittext.extend(["HG: removed %s" % f for f in remove])
461 edittext.extend(["HG: removed %s" % f for f in remove])
462 if not changed and not remove:
462 if not changed and not remove:
463 edittext.append("HG: no files changed")
463 edittext.append("HG: no files changed")
464 edittext.append("")
464 edittext.append("")
465 # run editor in the repository root
465 # run editor in the repository root
466 olddir = os.getcwd()
466 olddir = os.getcwd()
467 os.chdir(self.root)
467 os.chdir(self.root)
468 edittext = self.ui.edit("\n".join(edittext), user)
468 edittext = self.ui.edit("\n".join(edittext), user)
469 os.chdir(olddir)
469 os.chdir(olddir)
470 if not edittext.rstrip():
470 if not edittext.rstrip():
471 return None
471 return None
472 text = edittext
472 text = edittext
473
473
474 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
474 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
475 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
475 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
476 parent2=xp2)
476 parent2=xp2)
477 tr.close()
477 tr.close()
478
478
479 self.dirstate.setparents(n)
479 self.dirstate.setparents(n)
480 self.dirstate.update(new, "n")
480 self.dirstate.update(new, "n")
481 self.dirstate.forget(remove)
481 self.dirstate.forget(remove)
482
482
483 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
483 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
484 return n
484 return n
485
485
486 def walk(self, node=None, files=[], match=util.always):
486 def walk(self, node=None, files=[], match=util.always):
487 if node:
487 if node:
488 fdict = dict.fromkeys(files)
488 fdict = dict.fromkeys(files)
489 for fn in self.manifest.read(self.changelog.read(node)[0]):
489 for fn in self.manifest.read(self.changelog.read(node)[0]):
490 fdict.pop(fn, None)
490 fdict.pop(fn, None)
491 if match(fn):
491 if match(fn):
492 yield 'm', fn
492 yield 'm', fn
493 for fn in fdict:
493 for fn in fdict:
494 self.ui.warn(_('%s: No such file in rev %s\n') % (
494 self.ui.warn(_('%s: No such file in rev %s\n') % (
495 util.pathto(self.getcwd(), fn), short(node)))
495 util.pathto(self.getcwd(), fn), short(node)))
496 else:
496 else:
497 for src, fn in self.dirstate.walk(files, match):
497 for src, fn in self.dirstate.walk(files, match):
498 yield src, fn
498 yield src, fn
499
499
500 def changes(self, node1=None, node2=None, files=[], match=util.always,
500 def changes(self, node1=None, node2=None, files=[], match=util.always,
501 wlock=None):
501 wlock=None):
502 """return changes between two nodes or node and working directory
502 """return changes between two nodes or node and working directory
503
503
504 If node1 is None, use the first dirstate parent instead.
504 If node1 is None, use the first dirstate parent instead.
505 If node2 is None, compare node1 with working directory.
505 If node2 is None, compare node1 with working directory.
506 """
506 """
507
507
508 def fcmp(fn, mf):
508 def fcmp(fn, mf):
509 t1 = self.wread(fn)
509 t1 = self.wread(fn)
510 t2 = self.file(fn).read(mf.get(fn, nullid))
510 t2 = self.file(fn).read(mf.get(fn, nullid))
511 return cmp(t1, t2)
511 return cmp(t1, t2)
512
512
513 def mfmatches(node):
513 def mfmatches(node):
514 change = self.changelog.read(node)
514 change = self.changelog.read(node)
515 mf = dict(self.manifest.read(change[0]))
515 mf = dict(self.manifest.read(change[0]))
516 for fn in mf.keys():
516 for fn in mf.keys():
517 if not match(fn):
517 if not match(fn):
518 del mf[fn]
518 del mf[fn]
519 return mf
519 return mf
520
520
521 if node1:
521 if node1:
522 # read the manifest from node1 before the manifest from node2,
522 # read the manifest from node1 before the manifest from node2,
523 # so that we'll hit the manifest cache if we're going through
523 # so that we'll hit the manifest cache if we're going through
524 # all the revisions in parent->child order.
524 # all the revisions in parent->child order.
525 mf1 = mfmatches(node1)
525 mf1 = mfmatches(node1)
526
526
527 # are we comparing the working directory?
527 # are we comparing the working directory?
528 if not node2:
528 if not node2:
529 if not wlock:
529 if not wlock:
530 try:
530 try:
531 wlock = self.wlock(wait=0)
531 wlock = self.wlock(wait=0)
532 except lock.LockException:
532 except lock.LockException:
533 wlock = None
533 wlock = None
534 lookup, modified, added, removed, deleted, unknown = (
534 lookup, modified, added, removed, deleted, unknown = (
535 self.dirstate.changes(files, match))
535 self.dirstate.changes(files, match))
536
536
537 # are we comparing working dir against its parent?
537 # are we comparing working dir against its parent?
538 if not node1:
538 if not node1:
539 if lookup:
539 if lookup:
540 # do a full compare of any files that might have changed
540 # do a full compare of any files that might have changed
541 mf2 = mfmatches(self.dirstate.parents()[0])
541 mf2 = mfmatches(self.dirstate.parents()[0])
542 for f in lookup:
542 for f in lookup:
543 if fcmp(f, mf2):
543 if fcmp(f, mf2):
544 modified.append(f)
544 modified.append(f)
545 elif wlock is not None:
545 elif wlock is not None:
546 self.dirstate.update([f], "n")
546 self.dirstate.update([f], "n")
547 else:
547 else:
548 # we are comparing working dir against non-parent
548 # we are comparing working dir against non-parent
549 # generate a pseudo-manifest for the working dir
549 # generate a pseudo-manifest for the working dir
550 mf2 = mfmatches(self.dirstate.parents()[0])
550 mf2 = mfmatches(self.dirstate.parents()[0])
551 for f in lookup + modified + added:
551 for f in lookup + modified + added:
552 mf2[f] = ""
552 mf2[f] = ""
553 for f in removed:
553 for f in removed:
554 if f in mf2:
554 if f in mf2:
555 del mf2[f]
555 del mf2[f]
556 else:
556 else:
557 # we are comparing two revisions
557 # we are comparing two revisions
558 deleted, unknown = [], []
558 deleted, unknown = [], []
559 mf2 = mfmatches(node2)
559 mf2 = mfmatches(node2)
560
560
561 if node1:
561 if node1:
562 # flush lists from dirstate before comparing manifests
562 # flush lists from dirstate before comparing manifests
563 modified, added = [], []
563 modified, added = [], []
564
564
565 for fn in mf2:
565 for fn in mf2:
566 if mf1.has_key(fn):
566 if mf1.has_key(fn):
567 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
567 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
568 modified.append(fn)
568 modified.append(fn)
569 del mf1[fn]
569 del mf1[fn]
570 else:
570 else:
571 added.append(fn)
571 added.append(fn)
572
572
573 removed = mf1.keys()
573 removed = mf1.keys()
574
574
575 # sort and return results:
575 # sort and return results:
576 for l in modified, added, removed, deleted, unknown:
576 for l in modified, added, removed, deleted, unknown:
577 l.sort()
577 l.sort()
578 return (modified, added, removed, deleted, unknown)
578 return (modified, added, removed, deleted, unknown)
579
579
580 def add(self, list, wlock=None):
580 def add(self, list, wlock=None):
581 if not wlock:
581 if not wlock:
582 wlock = self.wlock()
582 wlock = self.wlock()
583 for f in list:
583 for f in list:
584 p = self.wjoin(f)
584 p = self.wjoin(f)
585 if not os.path.exists(p):
585 if not os.path.exists(p):
586 self.ui.warn(_("%s does not exist!\n") % f)
586 self.ui.warn(_("%s does not exist!\n") % f)
587 elif not os.path.isfile(p):
587 elif not os.path.isfile(p):
588 self.ui.warn(_("%s not added: only files supported currently\n")
588 self.ui.warn(_("%s not added: only files supported currently\n")
589 % f)
589 % f)
590 elif self.dirstate.state(f) in 'an':
590 elif self.dirstate.state(f) in 'an':
591 self.ui.warn(_("%s already tracked!\n") % f)
591 self.ui.warn(_("%s already tracked!\n") % f)
592 else:
592 else:
593 self.dirstate.update([f], "a")
593 self.dirstate.update([f], "a")
594
594
595 def forget(self, list, wlock=None):
595 def forget(self, list, wlock=None):
596 if not wlock:
596 if not wlock:
597 wlock = self.wlock()
597 wlock = self.wlock()
598 for f in list:
598 for f in list:
599 if self.dirstate.state(f) not in 'ai':
599 if self.dirstate.state(f) not in 'ai':
600 self.ui.warn(_("%s not added!\n") % f)
600 self.ui.warn(_("%s not added!\n") % f)
601 else:
601 else:
602 self.dirstate.forget([f])
602 self.dirstate.forget([f])
603
603
604 def remove(self, list, unlink=False, wlock=None):
604 def remove(self, list, unlink=False, wlock=None):
605 if unlink:
605 if unlink:
606 for f in list:
606 for f in list:
607 try:
607 try:
608 util.unlink(self.wjoin(f))
608 util.unlink(self.wjoin(f))
609 except OSError, inst:
609 except OSError, inst:
610 if inst.errno != errno.ENOENT:
610 if inst.errno != errno.ENOENT:
611 raise
611 raise
612 if not wlock:
612 if not wlock:
613 wlock = self.wlock()
613 wlock = self.wlock()
614 for f in list:
614 for f in list:
615 p = self.wjoin(f)
615 p = self.wjoin(f)
616 if os.path.exists(p):
616 if os.path.exists(p):
617 self.ui.warn(_("%s still exists!\n") % f)
617 self.ui.warn(_("%s still exists!\n") % f)
618 elif self.dirstate.state(f) == 'a':
618 elif self.dirstate.state(f) == 'a':
619 self.dirstate.forget([f])
619 self.dirstate.forget([f])
620 elif f not in self.dirstate:
620 elif f not in self.dirstate:
621 self.ui.warn(_("%s not tracked!\n") % f)
621 self.ui.warn(_("%s not tracked!\n") % f)
622 else:
622 else:
623 self.dirstate.update([f], "r")
623 self.dirstate.update([f], "r")
624
624
625 def undelete(self, list, wlock=None):
625 def undelete(self, list, wlock=None):
626 p = self.dirstate.parents()[0]
626 p = self.dirstate.parents()[0]
627 mn = self.changelog.read(p)[0]
627 mn = self.changelog.read(p)[0]
628 mf = self.manifest.readflags(mn)
628 mf = self.manifest.readflags(mn)
629 m = self.manifest.read(mn)
629 m = self.manifest.read(mn)
630 if not wlock:
630 if not wlock:
631 wlock = self.wlock()
631 wlock = self.wlock()
632 for f in list:
632 for f in list:
633 if self.dirstate.state(f) not in "r":
633 if self.dirstate.state(f) not in "r":
634 self.ui.warn("%s not removed!\n" % f)
634 self.ui.warn("%s not removed!\n" % f)
635 else:
635 else:
636 t = self.file(f).read(m[f])
636 t = self.file(f).read(m[f])
637 self.wwrite(f, t)
637 self.wwrite(f, t)
638 util.set_exec(self.wjoin(f), mf[f])
638 util.set_exec(self.wjoin(f), mf[f])
639 self.dirstate.update([f], "n")
639 self.dirstate.update([f], "n")
640
640
641 def copy(self, source, dest, wlock=None):
641 def copy(self, source, dest, wlock=None):
642 p = self.wjoin(dest)
642 p = self.wjoin(dest)
643 if not os.path.exists(p):
643 if not os.path.exists(p):
644 self.ui.warn(_("%s does not exist!\n") % dest)
644 self.ui.warn(_("%s does not exist!\n") % dest)
645 elif not os.path.isfile(p):
645 elif not os.path.isfile(p):
646 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
646 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
647 else:
647 else:
648 if not wlock:
648 if not wlock:
649 wlock = self.wlock()
649 wlock = self.wlock()
650 if self.dirstate.state(dest) == '?':
650 if self.dirstate.state(dest) == '?':
651 self.dirstate.update([dest], "a")
651 self.dirstate.update([dest], "a")
652 self.dirstate.copy(source, dest)
652 self.dirstate.copy(source, dest)
653
653
654 def heads(self, start=None):
654 def heads(self, start=None):
655 heads = self.changelog.heads(start)
655 heads = self.changelog.heads(start)
656 # sort the output in rev descending order
656 # sort the output in rev descending order
657 heads = [(-self.changelog.rev(h), h) for h in heads]
657 heads = [(-self.changelog.rev(h), h) for h in heads]
658 heads.sort()
658 heads.sort()
659 return [n for (r, n) in heads]
659 return [n for (r, n) in heads]
660
660
661 # branchlookup returns a dict giving a list of branches for
661 # branchlookup returns a dict giving a list of branches for
662 # each head. A branch is defined as the tag of a node or
662 # each head. A branch is defined as the tag of a node or
663 # the branch of the node's parents. If a node has multiple
663 # the branch of the node's parents. If a node has multiple
664 # branch tags, tags are eliminated if they are visible from other
664 # branch tags, tags are eliminated if they are visible from other
665 # branch tags.
665 # branch tags.
666 #
666 #
667 # So, for this graph: a->b->c->d->e
667 # So, for this graph: a->b->c->d->e
668 # \ /
668 # \ /
669 # aa -----/
669 # aa -----/
670 # a has tag 2.6.12
670 # a has tag 2.6.12
671 # d has tag 2.6.13
671 # d has tag 2.6.13
672 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
672 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
673 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
673 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
674 # from the list.
674 # from the list.
675 #
675 #
676 # It is possible that more than one head will have the same branch tag.
676 # It is possible that more than one head will have the same branch tag.
677 # callers need to check the result for multiple heads under the same
677 # callers need to check the result for multiple heads under the same
678 # branch tag if that is a problem for them (ie checkout of a specific
678 # branch tag if that is a problem for them (ie checkout of a specific
679 # branch).
679 # branch).
680 #
680 #
681 # passing in a specific branch will limit the depth of the search
681 # passing in a specific branch will limit the depth of the search
682 # through the parents. It won't limit the branches returned in the
682 # through the parents. It won't limit the branches returned in the
683 # result though.
683 # result though.
684 def branchlookup(self, heads=None, branch=None):
684 def branchlookup(self, heads=None, branch=None):
685 if not heads:
685 if not heads:
686 heads = self.heads()
686 heads = self.heads()
687 headt = [ h for h in heads ]
687 headt = [ h for h in heads ]
688 chlog = self.changelog
688 chlog = self.changelog
689 branches = {}
689 branches = {}
690 merges = []
690 merges = []
691 seenmerge = {}
691 seenmerge = {}
692
692
693 # traverse the tree once for each head, recording in the branches
693 # traverse the tree once for each head, recording in the branches
694 # dict which tags are visible from this head. The branches
694 # dict which tags are visible from this head. The branches
695 # dict also records which tags are visible from each tag
695 # dict also records which tags are visible from each tag
696 # while we traverse.
696 # while we traverse.
697 while headt or merges:
697 while headt or merges:
698 if merges:
698 if merges:
699 n, found = merges.pop()
699 n, found = merges.pop()
700 visit = [n]
700 visit = [n]
701 else:
701 else:
702 h = headt.pop()
702 h = headt.pop()
703 visit = [h]
703 visit = [h]
704 found = [h]
704 found = [h]
705 seen = {}
705 seen = {}
706 while visit:
706 while visit:
707 n = visit.pop()
707 n = visit.pop()
708 if n in seen:
708 if n in seen:
709 continue
709 continue
710 pp = chlog.parents(n)
710 pp = chlog.parents(n)
711 tags = self.nodetags(n)
711 tags = self.nodetags(n)
712 if tags:
712 if tags:
713 for x in tags:
713 for x in tags:
714 if x == 'tip':
714 if x == 'tip':
715 continue
715 continue
716 for f in found:
716 for f in found:
717 branches.setdefault(f, {})[n] = 1
717 branches.setdefault(f, {})[n] = 1
718 branches.setdefault(n, {})[n] = 1
718 branches.setdefault(n, {})[n] = 1
719 break
719 break
720 if n not in found:
720 if n not in found:
721 found.append(n)
721 found.append(n)
722 if branch in tags:
722 if branch in tags:
723 continue
723 continue
724 seen[n] = 1
724 seen[n] = 1
725 if pp[1] != nullid and n not in seenmerge:
725 if pp[1] != nullid and n not in seenmerge:
726 merges.append((pp[1], [x for x in found]))
726 merges.append((pp[1], [x for x in found]))
727 seenmerge[n] = 1
727 seenmerge[n] = 1
728 if pp[0] != nullid:
728 if pp[0] != nullid:
729 visit.append(pp[0])
729 visit.append(pp[0])
730 # traverse the branches dict, eliminating branch tags from each
730 # traverse the branches dict, eliminating branch tags from each
731 # head that are visible from another branch tag for that head.
731 # head that are visible from another branch tag for that head.
732 out = {}
732 out = {}
733 viscache = {}
733 viscache = {}
734 for h in heads:
734 for h in heads:
735 def visible(node):
735 def visible(node):
736 if node in viscache:
736 if node in viscache:
737 return viscache[node]
737 return viscache[node]
738 ret = {}
738 ret = {}
739 visit = [node]
739 visit = [node]
740 while visit:
740 while visit:
741 x = visit.pop()
741 x = visit.pop()
742 if x in viscache:
742 if x in viscache:
743 ret.update(viscache[x])
743 ret.update(viscache[x])
744 elif x not in ret:
744 elif x not in ret:
745 ret[x] = 1
745 ret[x] = 1
746 if x in branches:
746 if x in branches:
747 visit[len(visit):] = branches[x].keys()
747 visit[len(visit):] = branches[x].keys()
748 viscache[node] = ret
748 viscache[node] = ret
749 return ret
749 return ret
750 if h not in branches:
750 if h not in branches:
751 continue
751 continue
752 # O(n^2), but somewhat limited. This only searches the
752 # O(n^2), but somewhat limited. This only searches the
753 # tags visible from a specific head, not all the tags in the
753 # tags visible from a specific head, not all the tags in the
754 # whole repo.
754 # whole repo.
755 for b in branches[h]:
755 for b in branches[h]:
756 vis = False
756 vis = False
757 for bb in branches[h].keys():
757 for bb in branches[h].keys():
758 if b != bb:
758 if b != bb:
759 if b in visible(bb):
759 if b in visible(bb):
760 vis = True
760 vis = True
761 break
761 break
762 if not vis:
762 if not vis:
763 l = out.setdefault(h, [])
763 l = out.setdefault(h, [])
764 l[len(l):] = self.nodetags(b)
764 l[len(l):] = self.nodetags(b)
765 return out
765 return out
766
766
767 def branches(self, nodes):
767 def branches(self, nodes):
768 if not nodes:
768 if not nodes:
769 nodes = [self.changelog.tip()]
769 nodes = [self.changelog.tip()]
770 b = []
770 b = []
771 for n in nodes:
771 for n in nodes:
772 t = n
772 t = n
773 while n:
773 while n:
774 p = self.changelog.parents(n)
774 p = self.changelog.parents(n)
775 if p[1] != nullid or p[0] == nullid:
775 if p[1] != nullid or p[0] == nullid:
776 b.append((t, n, p[0], p[1]))
776 b.append((t, n, p[0], p[1]))
777 break
777 break
778 n = p[0]
778 n = p[0]
779 return b
779 return b
780
780
781 def between(self, pairs):
781 def between(self, pairs):
782 r = []
782 r = []
783
783
784 for top, bottom in pairs:
784 for top, bottom in pairs:
785 n, l, i = top, [], 0
785 n, l, i = top, [], 0
786 f = 1
786 f = 1
787
787
788 while n != bottom:
788 while n != bottom:
789 p = self.changelog.parents(n)[0]
789 p = self.changelog.parents(n)[0]
790 if i == f:
790 if i == f:
791 l.append(n)
791 l.append(n)
792 f = f * 2
792 f = f * 2
793 n = p
793 n = p
794 i += 1
794 i += 1
795
795
796 r.append(l)
796 r.append(l)
797
797
798 return r
798 return r
799
799
800 def findincoming(self, remote, base=None, heads=None, force=False):
800 def findincoming(self, remote, base=None, heads=None, force=False):
801 m = self.changelog.nodemap
801 m = self.changelog.nodemap
802 search = []
802 search = []
803 fetch = {}
803 fetch = {}
804 seen = {}
804 seen = {}
805 seenbranch = {}
805 seenbranch = {}
806 if base == None:
806 if base == None:
807 base = {}
807 base = {}
808
808
809 # assume we're closer to the tip than the root
809 # assume we're closer to the tip than the root
810 # and start by examining the heads
810 # and start by examining the heads
811 self.ui.status(_("searching for changes\n"))
811 self.ui.status(_("searching for changes\n"))
812
812
813 if not heads:
813 if not heads:
814 heads = remote.heads()
814 heads = remote.heads()
815
815
816 unknown = []
816 unknown = []
817 for h in heads:
817 for h in heads:
818 if h not in m:
818 if h not in m:
819 unknown.append(h)
819 unknown.append(h)
820 else:
820 else:
821 base[h] = 1
821 base[h] = 1
822
822
823 if not unknown:
823 if not unknown:
824 return []
824 return []
825
825
826 rep = {}
826 rep = {}
827 reqcnt = 0
827 reqcnt = 0
828
828
829 # search through remote branches
829 # search through remote branches
830 # a 'branch' here is a linear segment of history, with four parts:
830 # a 'branch' here is a linear segment of history, with four parts:
831 # head, root, first parent, second parent
831 # head, root, first parent, second parent
832 # (a branch always has two parents (or none) by definition)
832 # (a branch always has two parents (or none) by definition)
833 unknown = remote.branches(unknown)
833 unknown = remote.branches(unknown)
834 while unknown:
834 while unknown:
835 r = []
835 r = []
836 while unknown:
836 while unknown:
837 n = unknown.pop(0)
837 n = unknown.pop(0)
838 if n[0] in seen:
838 if n[0] in seen:
839 continue
839 continue
840
840
841 self.ui.debug(_("examining %s:%s\n")
841 self.ui.debug(_("examining %s:%s\n")
842 % (short(n[0]), short(n[1])))
842 % (short(n[0]), short(n[1])))
843 if n[0] == nullid:
843 if n[0] == nullid:
844 break
844 break
845 if n in seenbranch:
845 if n in seenbranch:
846 self.ui.debug(_("branch already found\n"))
846 self.ui.debug(_("branch already found\n"))
847 continue
847 continue
848 if n[1] and n[1] in m: # do we know the base?
848 if n[1] and n[1] in m: # do we know the base?
849 self.ui.debug(_("found incomplete branch %s:%s\n")
849 self.ui.debug(_("found incomplete branch %s:%s\n")
850 % (short(n[0]), short(n[1])))
850 % (short(n[0]), short(n[1])))
851 search.append(n) # schedule branch range for scanning
851 search.append(n) # schedule branch range for scanning
852 seenbranch[n] = 1
852 seenbranch[n] = 1
853 else:
853 else:
854 if n[1] not in seen and n[1] not in fetch:
854 if n[1] not in seen and n[1] not in fetch:
855 if n[2] in m and n[3] in m:
855 if n[2] in m and n[3] in m:
856 self.ui.debug(_("found new changeset %s\n") %
856 self.ui.debug(_("found new changeset %s\n") %
857 short(n[1]))
857 short(n[1]))
858 fetch[n[1]] = 1 # earliest unknown
858 fetch[n[1]] = 1 # earliest unknown
859 base[n[2]] = 1 # latest known
859 base[n[2]] = 1 # latest known
860 continue
860 continue
861
861
862 for a in n[2:4]:
862 for a in n[2:4]:
863 if a not in rep:
863 if a not in rep:
864 r.append(a)
864 r.append(a)
865 rep[a] = 1
865 rep[a] = 1
866
866
867 seen[n[0]] = 1
867 seen[n[0]] = 1
868
868
869 if r:
869 if r:
870 reqcnt += 1
870 reqcnt += 1
871 self.ui.debug(_("request %d: %s\n") %
871 self.ui.debug(_("request %d: %s\n") %
872 (reqcnt, " ".join(map(short, r))))
872 (reqcnt, " ".join(map(short, r))))
873 for p in range(0, len(r), 10):
873 for p in range(0, len(r), 10):
874 for b in remote.branches(r[p:p+10]):
874 for b in remote.branches(r[p:p+10]):
875 self.ui.debug(_("received %s:%s\n") %
875 self.ui.debug(_("received %s:%s\n") %
876 (short(b[0]), short(b[1])))
876 (short(b[0]), short(b[1])))
877 if b[0] in m:
877 if b[0] in m:
878 self.ui.debug(_("found base node %s\n")
878 self.ui.debug(_("found base node %s\n")
879 % short(b[0]))
879 % short(b[0]))
880 base[b[0]] = 1
880 base[b[0]] = 1
881 elif b[0] not in seen:
881 elif b[0] not in seen:
882 unknown.append(b)
882 unknown.append(b)
883
883
884 # do binary search on the branches we found
884 # do binary search on the branches we found
885 while search:
885 while search:
886 n = search.pop(0)
886 n = search.pop(0)
887 reqcnt += 1
887 reqcnt += 1
888 l = remote.between([(n[0], n[1])])[0]
888 l = remote.between([(n[0], n[1])])[0]
889 l.append(n[1])
889 l.append(n[1])
890 p = n[0]
890 p = n[0]
891 f = 1
891 f = 1
892 for i in l:
892 for i in l:
893 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
893 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
894 if i in m:
894 if i in m:
895 if f <= 2:
895 if f <= 2:
896 self.ui.debug(_("found new branch changeset %s\n") %
896 self.ui.debug(_("found new branch changeset %s\n") %
897 short(p))
897 short(p))
898 fetch[p] = 1
898 fetch[p] = 1
899 base[i] = 1
899 base[i] = 1
900 else:
900 else:
901 self.ui.debug(_("narrowed branch search to %s:%s\n")
901 self.ui.debug(_("narrowed branch search to %s:%s\n")
902 % (short(p), short(i)))
902 % (short(p), short(i)))
903 search.append((p, i))
903 search.append((p, i))
904 break
904 break
905 p, f = i, f * 2
905 p, f = i, f * 2
906
906
907 # sanity check our fetch list
907 # sanity check our fetch list
908 for f in fetch.keys():
908 for f in fetch.keys():
909 if f in m:
909 if f in m:
910 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
910 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
911
911
912 if base.keys() == [nullid]:
912 if base.keys() == [nullid]:
913 if force:
913 if force:
914 self.ui.warn(_("warning: repository is unrelated\n"))
914 self.ui.warn(_("warning: repository is unrelated\n"))
915 else:
915 else:
916 raise util.Abort(_("repository is unrelated"))
916 raise util.Abort(_("repository is unrelated"))
917
917
918 self.ui.note(_("found new changesets starting at ") +
918 self.ui.note(_("found new changesets starting at ") +
919 " ".join([short(f) for f in fetch]) + "\n")
919 " ".join([short(f) for f in fetch]) + "\n")
920
920
921 self.ui.debug(_("%d total queries\n") % reqcnt)
921 self.ui.debug(_("%d total queries\n") % reqcnt)
922
922
923 return fetch.keys()
923 return fetch.keys()
924
924
925 def findoutgoing(self, remote, base=None, heads=None, force=False):
925 def findoutgoing(self, remote, base=None, heads=None, force=False):
926 """Return list of nodes that are roots of subsets not in remote
927
928 If base dict is specified, assume that these nodes and their parents
929 exist on the remote side.
930 If a list of heads is specified, return only nodes which are heads
931 or ancestors of these heads, and return a second element which
932 contains all remote heads which get new children.
933 """
926 if base == None:
934 if base == None:
927 base = {}
935 base = {}
928 self.findincoming(remote, base, heads, force=force)
936 self.findincoming(remote, base, heads, force=force)
929
937
930 self.ui.debug(_("common changesets up to ")
938 self.ui.debug(_("common changesets up to ")
931 + " ".join(map(short, base.keys())) + "\n")
939 + " ".join(map(short, base.keys())) + "\n")
932
940
933 remain = dict.fromkeys(self.changelog.nodemap)
941 remain = dict.fromkeys(self.changelog.nodemap)
934
942
935 # prune everything remote has from the tree
943 # prune everything remote has from the tree
936 del remain[nullid]
944 del remain[nullid]
937 remove = base.keys()
945 remove = base.keys()
938 while remove:
946 while remove:
939 n = remove.pop(0)
947 n = remove.pop(0)
940 if n in remain:
948 if n in remain:
941 del remain[n]
949 del remain[n]
942 for p in self.changelog.parents(n):
950 for p in self.changelog.parents(n):
943 remove.append(p)
951 remove.append(p)
944
952
945 # find every node whose parents have been pruned
953 # find every node whose parents have been pruned
946 subset = []
954 subset = []
955 # find every remote head that will get new children
956 updated_heads = {}
947 for n in remain:
957 for n in remain:
948 p1, p2 = self.changelog.parents(n)
958 p1, p2 = self.changelog.parents(n)
949 if p1 not in remain and p2 not in remain:
959 if p1 not in remain and p2 not in remain:
950 subset.append(n)
960 subset.append(n)
961 if heads:
962 if p1 in heads:
963 updated_heads[p1] = True
964 if p2 in heads:
965 updated_heads[p2] = True
951
966
952 # this is the set of all roots we have to push
967 # this is the set of all roots we have to push
953 return subset
968 if heads:
969 return subset, updated_heads.keys()
970 else:
971 return subset
954
972
955 def pull(self, remote, heads=None, force=False):
973 def pull(self, remote, heads=None, force=False):
956 l = self.lock()
974 l = self.lock()
957
975
958 # if we have an empty repo, fetch everything
976 # if we have an empty repo, fetch everything
959 if self.changelog.tip() == nullid:
977 if self.changelog.tip() == nullid:
960 self.ui.status(_("requesting all changes\n"))
978 self.ui.status(_("requesting all changes\n"))
961 fetch = [nullid]
979 fetch = [nullid]
962 else:
980 else:
963 fetch = self.findincoming(remote, force=force)
981 fetch = self.findincoming(remote, force=force)
964
982
965 if not fetch:
983 if not fetch:
966 self.ui.status(_("no changes found\n"))
984 self.ui.status(_("no changes found\n"))
967 return 0
985 return 0
968
986
969 if heads is None:
987 if heads is None:
970 cg = remote.changegroup(fetch, 'pull')
988 cg = remote.changegroup(fetch, 'pull')
971 else:
989 else:
972 cg = remote.changegroupsubset(fetch, heads, 'pull')
990 cg = remote.changegroupsubset(fetch, heads, 'pull')
973 return self.addchangegroup(cg)
991 return self.addchangegroup(cg)
974
992
975 def push(self, remote, force=False, revs=None):
993 def push(self, remote, force=False, revs=None):
976 lock = remote.lock()
994 lock = remote.lock()
977
995
978 base = {}
996 base = {}
979 heads = remote.heads()
997 remote_heads = remote.heads()
980 inc = self.findincoming(remote, base, heads, force=force)
998 inc = self.findincoming(remote, base, remote_heads, force=force)
981 if not force and inc:
999 if not force and inc:
982 self.ui.warn(_("abort: unsynced remote changes!\n"))
1000 self.ui.warn(_("abort: unsynced remote changes!\n"))
983 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
1001 self.ui.status(_("(did you forget to sync?"
1002 " use push -f to force)\n"))
984 return 1
1003 return 1
985
1004
986 update = self.findoutgoing(remote, base)
1005 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
987 if revs is not None:
1006 if revs is not None:
988 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1007 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
989 else:
1008 else:
990 bases, heads = update, self.changelog.heads()
1009 bases, heads = update, self.changelog.heads()
991
1010
992 if not bases:
1011 if not bases:
993 self.ui.status(_("no changes found\n"))
1012 self.ui.status(_("no changes found\n"))
994 return 1
1013 return 1
995 elif not force:
1014 elif not force:
996 if len(bases) < len(heads):
1015 if revs is not None:
1016 updated_heads = {}
1017 for base in msng_cl:
1018 for parent in self.changelog.parents(base):
1019 if parent in remote_heads:
1020 updated_heads[parent] = True
1021 updated_heads = updated_heads.keys()
1022 if len(updated_heads) < len(heads):
997 self.ui.warn(_("abort: push creates new remote branches!\n"))
1023 self.ui.warn(_("abort: push creates new remote branches!\n"))
998 self.ui.status(_("(did you forget to merge?"
1024 self.ui.status(_("(did you forget to merge?"
999 " use push -f to force)\n"))
1025 " use push -f to force)\n"))
1000 return 1
1026 return 1
1001
1027
1002 if revs is None:
1028 if revs is None:
1003 cg = self.changegroup(update, 'push')
1029 cg = self.changegroup(update, 'push')
1004 else:
1030 else:
1005 cg = self.changegroupsubset(update, revs, 'push')
1031 cg = self.changegroupsubset(update, revs, 'push')
1006 return remote.addchangegroup(cg)
1032 return remote.addchangegroup(cg)
1007
1033
1008 def changegroupsubset(self, bases, heads, source):
1034 def changegroupsubset(self, bases, heads, source):
1009 """This function generates a changegroup consisting of all the nodes
1035 """This function generates a changegroup consisting of all the nodes
1010 that are descendents of any of the bases, and ancestors of any of
1036 that are descendents of any of the bases, and ancestors of any of
1011 the heads.
1037 the heads.
1012
1038
1013 It is fairly complex as determining which filenodes and which
1039 It is fairly complex as determining which filenodes and which
1014 manifest nodes need to be included for the changeset to be complete
1040 manifest nodes need to be included for the changeset to be complete
1015 is non-trivial.
1041 is non-trivial.
1016
1042
1017 Another wrinkle is doing the reverse, figuring out which changeset in
1043 Another wrinkle is doing the reverse, figuring out which changeset in
1018 the changegroup a particular filenode or manifestnode belongs to."""
1044 the changegroup a particular filenode or manifestnode belongs to."""
1019
1045
1020 self.hook('preoutgoing', throw=True, source=source)
1046 self.hook('preoutgoing', throw=True, source=source)
1021
1047
1022 # Set up some initial variables
1048 # Set up some initial variables
1023 # Make it easy to refer to self.changelog
1049 # Make it easy to refer to self.changelog
1024 cl = self.changelog
1050 cl = self.changelog
1025 # msng is short for missing - compute the list of changesets in this
1051 # msng is short for missing - compute the list of changesets in this
1026 # changegroup.
1052 # changegroup.
1027 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1053 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1028 # Some bases may turn out to be superfluous, and some heads may be
1054 # Some bases may turn out to be superfluous, and some heads may be
1029 # too. nodesbetween will return the minimal set of bases and heads
1055 # too. nodesbetween will return the minimal set of bases and heads
1030 # necessary to re-create the changegroup.
1056 # necessary to re-create the changegroup.
1031
1057
1032 # Known heads are the list of heads that it is assumed the recipient
1058 # Known heads are the list of heads that it is assumed the recipient
1033 # of this changegroup will know about.
1059 # of this changegroup will know about.
1034 knownheads = {}
1060 knownheads = {}
1035 # We assume that all parents of bases are known heads.
1061 # We assume that all parents of bases are known heads.
1036 for n in bases:
1062 for n in bases:
1037 for p in cl.parents(n):
1063 for p in cl.parents(n):
1038 if p != nullid:
1064 if p != nullid:
1039 knownheads[p] = 1
1065 knownheads[p] = 1
1040 knownheads = knownheads.keys()
1066 knownheads = knownheads.keys()
1041 if knownheads:
1067 if knownheads:
1042 # Now that we know what heads are known, we can compute which
1068 # Now that we know what heads are known, we can compute which
1043 # changesets are known. The recipient must know about all
1069 # changesets are known. The recipient must know about all
1044 # changesets required to reach the known heads from the null
1070 # changesets required to reach the known heads from the null
1045 # changeset.
1071 # changeset.
1046 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1072 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1047 junk = None
1073 junk = None
1048 # Transform the list into an ersatz set.
1074 # Transform the list into an ersatz set.
1049 has_cl_set = dict.fromkeys(has_cl_set)
1075 has_cl_set = dict.fromkeys(has_cl_set)
1050 else:
1076 else:
1051 # If there were no known heads, the recipient cannot be assumed to
1077 # If there were no known heads, the recipient cannot be assumed to
1052 # know about any changesets.
1078 # know about any changesets.
1053 has_cl_set = {}
1079 has_cl_set = {}
1054
1080
1055 # Make it easy to refer to self.manifest
1081 # Make it easy to refer to self.manifest
1056 mnfst = self.manifest
1082 mnfst = self.manifest
1057 # We don't know which manifests are missing yet
1083 # We don't know which manifests are missing yet
1058 msng_mnfst_set = {}
1084 msng_mnfst_set = {}
1059 # Nor do we know which filenodes are missing.
1085 # Nor do we know which filenodes are missing.
1060 msng_filenode_set = {}
1086 msng_filenode_set = {}
1061
1087
1062 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1088 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1063 junk = None
1089 junk = None
1064
1090
1065 # A changeset always belongs to itself, so the changenode lookup
1091 # A changeset always belongs to itself, so the changenode lookup
1066 # function for a changenode is identity.
1092 # function for a changenode is identity.
1067 def identity(x):
1093 def identity(x):
1068 return x
1094 return x
1069
1095
1070 # A function generating function. Sets up an environment for the
1096 # A function generating function. Sets up an environment for the
1071 # inner function.
1097 # inner function.
1072 def cmp_by_rev_func(revlog):
1098 def cmp_by_rev_func(revlog):
1073 # Compare two nodes by their revision number in the environment's
1099 # Compare two nodes by their revision number in the environment's
1074 # revision history. Since the revision number both represents the
1100 # revision history. Since the revision number both represents the
1075 # most efficient order to read the nodes in, and represents a
1101 # most efficient order to read the nodes in, and represents a
1076 # topological sorting of the nodes, this function is often useful.
1102 # topological sorting of the nodes, this function is often useful.
1077 def cmp_by_rev(a, b):
1103 def cmp_by_rev(a, b):
1078 return cmp(revlog.rev(a), revlog.rev(b))
1104 return cmp(revlog.rev(a), revlog.rev(b))
1079 return cmp_by_rev
1105 return cmp_by_rev
1080
1106
1081 # If we determine that a particular file or manifest node must be a
1107 # If we determine that a particular file or manifest node must be a
1082 # node that the recipient of the changegroup will already have, we can
1108 # node that the recipient of the changegroup will already have, we can
1083 # also assume the recipient will have all the parents. This function
1109 # also assume the recipient will have all the parents. This function
1084 # prunes them from the set of missing nodes.
1110 # prunes them from the set of missing nodes.
1085 def prune_parents(revlog, hasset, msngset):
1111 def prune_parents(revlog, hasset, msngset):
1086 haslst = hasset.keys()
1112 haslst = hasset.keys()
1087 haslst.sort(cmp_by_rev_func(revlog))
1113 haslst.sort(cmp_by_rev_func(revlog))
1088 for node in haslst:
1114 for node in haslst:
1089 parentlst = [p for p in revlog.parents(node) if p != nullid]
1115 parentlst = [p for p in revlog.parents(node) if p != nullid]
1090 while parentlst:
1116 while parentlst:
1091 n = parentlst.pop()
1117 n = parentlst.pop()
1092 if n not in hasset:
1118 if n not in hasset:
1093 hasset[n] = 1
1119 hasset[n] = 1
1094 p = [p for p in revlog.parents(n) if p != nullid]
1120 p = [p for p in revlog.parents(n) if p != nullid]
1095 parentlst.extend(p)
1121 parentlst.extend(p)
1096 for n in hasset:
1122 for n in hasset:
1097 msngset.pop(n, None)
1123 msngset.pop(n, None)
1098
1124
1099 # This is a function generating function used to set up an environment
1125 # This is a function generating function used to set up an environment
1100 # for the inner function to execute in.
1126 # for the inner function to execute in.
1101 def manifest_and_file_collector(changedfileset):
1127 def manifest_and_file_collector(changedfileset):
1102 # This is an information gathering function that gathers
1128 # This is an information gathering function that gathers
1103 # information from each changeset node that goes out as part of
1129 # information from each changeset node that goes out as part of
1104 # the changegroup. The information gathered is a list of which
1130 # the changegroup. The information gathered is a list of which
1105 # manifest nodes are potentially required (the recipient may
1131 # manifest nodes are potentially required (the recipient may
1106 # already have them) and total list of all files which were
1132 # already have them) and total list of all files which were
1107 # changed in any changeset in the changegroup.
1133 # changed in any changeset in the changegroup.
1108 #
1134 #
1109 # We also remember the first changenode we saw any manifest
1135 # We also remember the first changenode we saw any manifest
1110 # referenced by so we can later determine which changenode 'owns'
1136 # referenced by so we can later determine which changenode 'owns'
1111 # the manifest.
1137 # the manifest.
1112 def collect_manifests_and_files(clnode):
1138 def collect_manifests_and_files(clnode):
1113 c = cl.read(clnode)
1139 c = cl.read(clnode)
1114 for f in c[3]:
1140 for f in c[3]:
1115 # This is to make sure we only have one instance of each
1141 # This is to make sure we only have one instance of each
1116 # filename string for each filename.
1142 # filename string for each filename.
1117 changedfileset.setdefault(f, f)
1143 changedfileset.setdefault(f, f)
1118 msng_mnfst_set.setdefault(c[0], clnode)
1144 msng_mnfst_set.setdefault(c[0], clnode)
1119 return collect_manifests_and_files
1145 return collect_manifests_and_files
1120
1146
1121 # Figure out which manifest nodes (of the ones we think might be part
1147 # Figure out which manifest nodes (of the ones we think might be part
1122 # of the changegroup) the recipient must know about and remove them
1148 # of the changegroup) the recipient must know about and remove them
1123 # from the changegroup.
1149 # from the changegroup.
1124 def prune_manifests():
1150 def prune_manifests():
1125 has_mnfst_set = {}
1151 has_mnfst_set = {}
1126 for n in msng_mnfst_set:
1152 for n in msng_mnfst_set:
1127 # If a 'missing' manifest thinks it belongs to a changenode
1153 # If a 'missing' manifest thinks it belongs to a changenode
1128 # the recipient is assumed to have, obviously the recipient
1154 # the recipient is assumed to have, obviously the recipient
1129 # must have that manifest.
1155 # must have that manifest.
1130 linknode = cl.node(mnfst.linkrev(n))
1156 linknode = cl.node(mnfst.linkrev(n))
1131 if linknode in has_cl_set:
1157 if linknode in has_cl_set:
1132 has_mnfst_set[n] = 1
1158 has_mnfst_set[n] = 1
1133 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1159 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1134
1160
1135 # Use the information collected in collect_manifests_and_files to say
1161 # Use the information collected in collect_manifests_and_files to say
1136 # which changenode any manifestnode belongs to.
1162 # which changenode any manifestnode belongs to.
1137 def lookup_manifest_link(mnfstnode):
1163 def lookup_manifest_link(mnfstnode):
1138 return msng_mnfst_set[mnfstnode]
1164 return msng_mnfst_set[mnfstnode]
1139
1165
1140 # A function generating function that sets up the initial environment
1166 # A function generating function that sets up the initial environment
1141 # the inner function.
1167 # the inner function.
1142 def filenode_collector(changedfiles):
1168 def filenode_collector(changedfiles):
1143 next_rev = [0]
1169 next_rev = [0]
1144 # This gathers information from each manifestnode included in the
1170 # This gathers information from each manifestnode included in the
1145 # changegroup about which filenodes the manifest node references
1171 # changegroup about which filenodes the manifest node references
1146 # so we can include those in the changegroup too.
1172 # so we can include those in the changegroup too.
1147 #
1173 #
1148 # It also remembers which changenode each filenode belongs to. It
1174 # It also remembers which changenode each filenode belongs to. It
1149 # does this by assuming the a filenode belongs to the changenode
1175 # does this by assuming the a filenode belongs to the changenode
1150 # the first manifest that references it belongs to.
1176 # the first manifest that references it belongs to.
1151 def collect_msng_filenodes(mnfstnode):
1177 def collect_msng_filenodes(mnfstnode):
1152 r = mnfst.rev(mnfstnode)
1178 r = mnfst.rev(mnfstnode)
1153 if r == next_rev[0]:
1179 if r == next_rev[0]:
1154 # If the last rev we looked at was the one just previous,
1180 # If the last rev we looked at was the one just previous,
1155 # we only need to see a diff.
1181 # we only need to see a diff.
1156 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1182 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1157 # For each line in the delta
1183 # For each line in the delta
1158 for dline in delta.splitlines():
1184 for dline in delta.splitlines():
1159 # get the filename and filenode for that line
1185 # get the filename and filenode for that line
1160 f, fnode = dline.split('\0')
1186 f, fnode = dline.split('\0')
1161 fnode = bin(fnode[:40])
1187 fnode = bin(fnode[:40])
1162 f = changedfiles.get(f, None)
1188 f = changedfiles.get(f, None)
1163 # And if the file is in the list of files we care
1189 # And if the file is in the list of files we care
1164 # about.
1190 # about.
1165 if f is not None:
1191 if f is not None:
1166 # Get the changenode this manifest belongs to
1192 # Get the changenode this manifest belongs to
1167 clnode = msng_mnfst_set[mnfstnode]
1193 clnode = msng_mnfst_set[mnfstnode]
1168 # Create the set of filenodes for the file if
1194 # Create the set of filenodes for the file if
1169 # there isn't one already.
1195 # there isn't one already.
1170 ndset = msng_filenode_set.setdefault(f, {})
1196 ndset = msng_filenode_set.setdefault(f, {})
1171 # And set the filenode's changelog node to the
1197 # And set the filenode's changelog node to the
1172 # manifest's if it hasn't been set already.
1198 # manifest's if it hasn't been set already.
1173 ndset.setdefault(fnode, clnode)
1199 ndset.setdefault(fnode, clnode)
1174 else:
1200 else:
1175 # Otherwise we need a full manifest.
1201 # Otherwise we need a full manifest.
1176 m = mnfst.read(mnfstnode)
1202 m = mnfst.read(mnfstnode)
1177 # For every file in we care about.
1203 # For every file in we care about.
1178 for f in changedfiles:
1204 for f in changedfiles:
1179 fnode = m.get(f, None)
1205 fnode = m.get(f, None)
1180 # If it's in the manifest
1206 # If it's in the manifest
1181 if fnode is not None:
1207 if fnode is not None:
1182 # See comments above.
1208 # See comments above.
1183 clnode = msng_mnfst_set[mnfstnode]
1209 clnode = msng_mnfst_set[mnfstnode]
1184 ndset = msng_filenode_set.setdefault(f, {})
1210 ndset = msng_filenode_set.setdefault(f, {})
1185 ndset.setdefault(fnode, clnode)
1211 ndset.setdefault(fnode, clnode)
1186 # Remember the revision we hope to see next.
1212 # Remember the revision we hope to see next.
1187 next_rev[0] = r + 1
1213 next_rev[0] = r + 1
1188 return collect_msng_filenodes
1214 return collect_msng_filenodes
1189
1215
1190 # We have a list of filenodes we think we need for a file, lets remove
1216 # We have a list of filenodes we think we need for a file, lets remove
1191 # all those we now the recipient must have.
1217 # all those we now the recipient must have.
1192 def prune_filenodes(f, filerevlog):
1218 def prune_filenodes(f, filerevlog):
1193 msngset = msng_filenode_set[f]
1219 msngset = msng_filenode_set[f]
1194 hasset = {}
1220 hasset = {}
1195 # If a 'missing' filenode thinks it belongs to a changenode we
1221 # If a 'missing' filenode thinks it belongs to a changenode we
1196 # assume the recipient must have, then the recipient must have
1222 # assume the recipient must have, then the recipient must have
1197 # that filenode.
1223 # that filenode.
1198 for n in msngset:
1224 for n in msngset:
1199 clnode = cl.node(filerevlog.linkrev(n))
1225 clnode = cl.node(filerevlog.linkrev(n))
1200 if clnode in has_cl_set:
1226 if clnode in has_cl_set:
1201 hasset[n] = 1
1227 hasset[n] = 1
1202 prune_parents(filerevlog, hasset, msngset)
1228 prune_parents(filerevlog, hasset, msngset)
1203
1229
1204 # A function generator function that sets up the a context for the
1230 # A function generator function that sets up the a context for the
1205 # inner function.
1231 # inner function.
1206 def lookup_filenode_link_func(fname):
1232 def lookup_filenode_link_func(fname):
1207 msngset = msng_filenode_set[fname]
1233 msngset = msng_filenode_set[fname]
1208 # Lookup the changenode the filenode belongs to.
1234 # Lookup the changenode the filenode belongs to.
1209 def lookup_filenode_link(fnode):
1235 def lookup_filenode_link(fnode):
1210 return msngset[fnode]
1236 return msngset[fnode]
1211 return lookup_filenode_link
1237 return lookup_filenode_link
1212
1238
1213 # Now that we have all theses utility functions to help out and
1239 # Now that we have all theses utility functions to help out and
1214 # logically divide up the task, generate the group.
1240 # logically divide up the task, generate the group.
1215 def gengroup():
1241 def gengroup():
1216 # The set of changed files starts empty.
1242 # The set of changed files starts empty.
1217 changedfiles = {}
1243 changedfiles = {}
1218 # Create a changenode group generator that will call our functions
1244 # Create a changenode group generator that will call our functions
1219 # back to lookup the owning changenode and collect information.
1245 # back to lookup the owning changenode and collect information.
1220 group = cl.group(msng_cl_lst, identity,
1246 group = cl.group(msng_cl_lst, identity,
1221 manifest_and_file_collector(changedfiles))
1247 manifest_and_file_collector(changedfiles))
1222 for chnk in group:
1248 for chnk in group:
1223 yield chnk
1249 yield chnk
1224
1250
1225 # The list of manifests has been collected by the generator
1251 # The list of manifests has been collected by the generator
1226 # calling our functions back.
1252 # calling our functions back.
1227 prune_manifests()
1253 prune_manifests()
1228 msng_mnfst_lst = msng_mnfst_set.keys()
1254 msng_mnfst_lst = msng_mnfst_set.keys()
1229 # Sort the manifestnodes by revision number.
1255 # Sort the manifestnodes by revision number.
1230 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1256 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1231 # Create a generator for the manifestnodes that calls our lookup
1257 # Create a generator for the manifestnodes that calls our lookup
1232 # and data collection functions back.
1258 # and data collection functions back.
1233 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1259 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1234 filenode_collector(changedfiles))
1260 filenode_collector(changedfiles))
1235 for chnk in group:
1261 for chnk in group:
1236 yield chnk
1262 yield chnk
1237
1263
1238 # These are no longer needed, dereference and toss the memory for
1264 # These are no longer needed, dereference and toss the memory for
1239 # them.
1265 # them.
1240 msng_mnfst_lst = None
1266 msng_mnfst_lst = None
1241 msng_mnfst_set.clear()
1267 msng_mnfst_set.clear()
1242
1268
1243 changedfiles = changedfiles.keys()
1269 changedfiles = changedfiles.keys()
1244 changedfiles.sort()
1270 changedfiles.sort()
1245 # Go through all our files in order sorted by name.
1271 # Go through all our files in order sorted by name.
1246 for fname in changedfiles:
1272 for fname in changedfiles:
1247 filerevlog = self.file(fname)
1273 filerevlog = self.file(fname)
1248 # Toss out the filenodes that the recipient isn't really
1274 # Toss out the filenodes that the recipient isn't really
1249 # missing.
1275 # missing.
1250 if msng_filenode_set.has_key(fname):
1276 if msng_filenode_set.has_key(fname):
1251 prune_filenodes(fname, filerevlog)
1277 prune_filenodes(fname, filerevlog)
1252 msng_filenode_lst = msng_filenode_set[fname].keys()
1278 msng_filenode_lst = msng_filenode_set[fname].keys()
1253 else:
1279 else:
1254 msng_filenode_lst = []
1280 msng_filenode_lst = []
1255 # If any filenodes are left, generate the group for them,
1281 # If any filenodes are left, generate the group for them,
1256 # otherwise don't bother.
1282 # otherwise don't bother.
1257 if len(msng_filenode_lst) > 0:
1283 if len(msng_filenode_lst) > 0:
1258 yield changegroup.genchunk(fname)
1284 yield changegroup.genchunk(fname)
1259 # Sort the filenodes by their revision #
1285 # Sort the filenodes by their revision #
1260 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1286 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1261 # Create a group generator and only pass in a changenode
1287 # Create a group generator and only pass in a changenode
1262 # lookup function as we need to collect no information
1288 # lookup function as we need to collect no information
1263 # from filenodes.
1289 # from filenodes.
1264 group = filerevlog.group(msng_filenode_lst,
1290 group = filerevlog.group(msng_filenode_lst,
1265 lookup_filenode_link_func(fname))
1291 lookup_filenode_link_func(fname))
1266 for chnk in group:
1292 for chnk in group:
1267 yield chnk
1293 yield chnk
1268 if msng_filenode_set.has_key(fname):
1294 if msng_filenode_set.has_key(fname):
1269 # Don't need this anymore, toss it to free memory.
1295 # Don't need this anymore, toss it to free memory.
1270 del msng_filenode_set[fname]
1296 del msng_filenode_set[fname]
1271 # Signal that no more groups are left.
1297 # Signal that no more groups are left.
1272 yield changegroup.closechunk()
1298 yield changegroup.closechunk()
1273
1299
1274 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1300 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1275
1301
1276 return util.chunkbuffer(gengroup())
1302 return util.chunkbuffer(gengroup())
1277
1303
1278 def changegroup(self, basenodes, source):
1304 def changegroup(self, basenodes, source):
1279 """Generate a changegroup of all nodes that we have that a recipient
1305 """Generate a changegroup of all nodes that we have that a recipient
1280 doesn't.
1306 doesn't.
1281
1307
1282 This is much easier than the previous function as we can assume that
1308 This is much easier than the previous function as we can assume that
1283 the recipient has any changenode we aren't sending them."""
1309 the recipient has any changenode we aren't sending them."""
1284
1310
1285 self.hook('preoutgoing', throw=True, source=source)
1311 self.hook('preoutgoing', throw=True, source=source)
1286
1312
1287 cl = self.changelog
1313 cl = self.changelog
1288 nodes = cl.nodesbetween(basenodes, None)[0]
1314 nodes = cl.nodesbetween(basenodes, None)[0]
1289 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1315 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1290
1316
1291 def identity(x):
1317 def identity(x):
1292 return x
1318 return x
1293
1319
1294 def gennodelst(revlog):
1320 def gennodelst(revlog):
1295 for r in xrange(0, revlog.count()):
1321 for r in xrange(0, revlog.count()):
1296 n = revlog.node(r)
1322 n = revlog.node(r)
1297 if revlog.linkrev(n) in revset:
1323 if revlog.linkrev(n) in revset:
1298 yield n
1324 yield n
1299
1325
1300 def changed_file_collector(changedfileset):
1326 def changed_file_collector(changedfileset):
1301 def collect_changed_files(clnode):
1327 def collect_changed_files(clnode):
1302 c = cl.read(clnode)
1328 c = cl.read(clnode)
1303 for fname in c[3]:
1329 for fname in c[3]:
1304 changedfileset[fname] = 1
1330 changedfileset[fname] = 1
1305 return collect_changed_files
1331 return collect_changed_files
1306
1332
1307 def lookuprevlink_func(revlog):
1333 def lookuprevlink_func(revlog):
1308 def lookuprevlink(n):
1334 def lookuprevlink(n):
1309 return cl.node(revlog.linkrev(n))
1335 return cl.node(revlog.linkrev(n))
1310 return lookuprevlink
1336 return lookuprevlink
1311
1337
1312 def gengroup():
1338 def gengroup():
1313 # construct a list of all changed files
1339 # construct a list of all changed files
1314 changedfiles = {}
1340 changedfiles = {}
1315
1341
1316 for chnk in cl.group(nodes, identity,
1342 for chnk in cl.group(nodes, identity,
1317 changed_file_collector(changedfiles)):
1343 changed_file_collector(changedfiles)):
1318 yield chnk
1344 yield chnk
1319 changedfiles = changedfiles.keys()
1345 changedfiles = changedfiles.keys()
1320 changedfiles.sort()
1346 changedfiles.sort()
1321
1347
1322 mnfst = self.manifest
1348 mnfst = self.manifest
1323 nodeiter = gennodelst(mnfst)
1349 nodeiter = gennodelst(mnfst)
1324 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1350 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1325 yield chnk
1351 yield chnk
1326
1352
1327 for fname in changedfiles:
1353 for fname in changedfiles:
1328 filerevlog = self.file(fname)
1354 filerevlog = self.file(fname)
1329 nodeiter = gennodelst(filerevlog)
1355 nodeiter = gennodelst(filerevlog)
1330 nodeiter = list(nodeiter)
1356 nodeiter = list(nodeiter)
1331 if nodeiter:
1357 if nodeiter:
1332 yield changegroup.genchunk(fname)
1358 yield changegroup.genchunk(fname)
1333 lookup = lookuprevlink_func(filerevlog)
1359 lookup = lookuprevlink_func(filerevlog)
1334 for chnk in filerevlog.group(nodeiter, lookup):
1360 for chnk in filerevlog.group(nodeiter, lookup):
1335 yield chnk
1361 yield chnk
1336
1362
1337 yield changegroup.closechunk()
1363 yield changegroup.closechunk()
1338 self.hook('outgoing', node=hex(nodes[0]), source=source)
1364 self.hook('outgoing', node=hex(nodes[0]), source=source)
1339
1365
1340 return util.chunkbuffer(gengroup())
1366 return util.chunkbuffer(gengroup())
1341
1367
1342 def addchangegroup(self, source):
1368 def addchangegroup(self, source):
1343 """add changegroup to repo.
1369 """add changegroup to repo.
1344 returns number of heads modified or added + 1."""
1370 returns number of heads modified or added + 1."""
1345
1371
1346 def csmap(x):
1372 def csmap(x):
1347 self.ui.debug(_("add changeset %s\n") % short(x))
1373 self.ui.debug(_("add changeset %s\n") % short(x))
1348 return cl.count()
1374 return cl.count()
1349
1375
1350 def revmap(x):
1376 def revmap(x):
1351 return cl.rev(x)
1377 return cl.rev(x)
1352
1378
1353 if not source:
1379 if not source:
1354 return 0
1380 return 0
1355
1381
1356 self.hook('prechangegroup', throw=True)
1382 self.hook('prechangegroup', throw=True)
1357
1383
1358 changesets = files = revisions = 0
1384 changesets = files = revisions = 0
1359
1385
1360 tr = self.transaction()
1386 tr = self.transaction()
1361
1387
1362 # write changelog and manifest data to temp files so
1388 # write changelog and manifest data to temp files so
1363 # concurrent readers will not see inconsistent view
1389 # concurrent readers will not see inconsistent view
1364 cl = appendfile.appendchangelog(self.opener)
1390 cl = appendfile.appendchangelog(self.opener)
1365
1391
1366 oldheads = len(cl.heads())
1392 oldheads = len(cl.heads())
1367
1393
1368 # pull off the changeset group
1394 # pull off the changeset group
1369 self.ui.status(_("adding changesets\n"))
1395 self.ui.status(_("adding changesets\n"))
1370 co = cl.tip()
1396 co = cl.tip()
1371 chunkiter = changegroup.chunkiter(source)
1397 chunkiter = changegroup.chunkiter(source)
1372 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1398 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1373 cnr, cor = map(cl.rev, (cn, co))
1399 cnr, cor = map(cl.rev, (cn, co))
1374 if cn == nullid:
1400 if cn == nullid:
1375 cnr = cor
1401 cnr = cor
1376 changesets = cnr - cor
1402 changesets = cnr - cor
1377
1403
1378 mf = appendfile.appendmanifest(self.opener)
1404 mf = appendfile.appendmanifest(self.opener)
1379
1405
1380 # pull off the manifest group
1406 # pull off the manifest group
1381 self.ui.status(_("adding manifests\n"))
1407 self.ui.status(_("adding manifests\n"))
1382 mm = mf.tip()
1408 mm = mf.tip()
1383 chunkiter = changegroup.chunkiter(source)
1409 chunkiter = changegroup.chunkiter(source)
1384 mo = mf.addgroup(chunkiter, revmap, tr)
1410 mo = mf.addgroup(chunkiter, revmap, tr)
1385
1411
1386 # process the files
1412 # process the files
1387 self.ui.status(_("adding file changes\n"))
1413 self.ui.status(_("adding file changes\n"))
1388 while 1:
1414 while 1:
1389 f = changegroup.getchunk(source)
1415 f = changegroup.getchunk(source)
1390 if not f:
1416 if not f:
1391 break
1417 break
1392 self.ui.debug(_("adding %s revisions\n") % f)
1418 self.ui.debug(_("adding %s revisions\n") % f)
1393 fl = self.file(f)
1419 fl = self.file(f)
1394 o = fl.count()
1420 o = fl.count()
1395 chunkiter = changegroup.chunkiter(source)
1421 chunkiter = changegroup.chunkiter(source)
1396 n = fl.addgroup(chunkiter, revmap, tr)
1422 n = fl.addgroup(chunkiter, revmap, tr)
1397 revisions += fl.count() - o
1423 revisions += fl.count() - o
1398 files += 1
1424 files += 1
1399
1425
1400 # write order here is important so concurrent readers will see
1426 # write order here is important so concurrent readers will see
1401 # consistent view of repo
1427 # consistent view of repo
1402 mf.writedata()
1428 mf.writedata()
1403 cl.writedata()
1429 cl.writedata()
1404
1430
1405 # make changelog and manifest see real files again
1431 # make changelog and manifest see real files again
1406 self.changelog = changelog.changelog(self.opener)
1432 self.changelog = changelog.changelog(self.opener)
1407 self.manifest = manifest.manifest(self.opener)
1433 self.manifest = manifest.manifest(self.opener)
1408
1434
1409 newheads = len(self.changelog.heads())
1435 newheads = len(self.changelog.heads())
1410 heads = ""
1436 heads = ""
1411 if oldheads and newheads > oldheads:
1437 if oldheads and newheads > oldheads:
1412 heads = _(" (+%d heads)") % (newheads - oldheads)
1438 heads = _(" (+%d heads)") % (newheads - oldheads)
1413
1439
1414 self.ui.status(_("added %d changesets"
1440 self.ui.status(_("added %d changesets"
1415 " with %d changes to %d files%s\n")
1441 " with %d changes to %d files%s\n")
1416 % (changesets, revisions, files, heads))
1442 % (changesets, revisions, files, heads))
1417
1443
1418 self.hook('pretxnchangegroup', throw=True,
1444 self.hook('pretxnchangegroup', throw=True,
1419 node=hex(self.changelog.node(cor+1)))
1445 node=hex(self.changelog.node(cor+1)))
1420
1446
1421 tr.close()
1447 tr.close()
1422
1448
1423 if changesets > 0:
1449 if changesets > 0:
1424 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1450 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1425
1451
1426 for i in range(cor + 1, cnr + 1):
1452 for i in range(cor + 1, cnr + 1):
1427 self.hook("incoming", node=hex(self.changelog.node(i)))
1453 self.hook("incoming", node=hex(self.changelog.node(i)))
1428
1454
1429 return newheads - oldheads + 1
1455 return newheads - oldheads + 1
1430
1456
1431 def update(self, node, allow=False, force=False, choose=None,
1457 def update(self, node, allow=False, force=False, choose=None,
1432 moddirstate=True, forcemerge=False, wlock=None):
1458 moddirstate=True, forcemerge=False, wlock=None):
1433 pl = self.dirstate.parents()
1459 pl = self.dirstate.parents()
1434 if not force and pl[1] != nullid:
1460 if not force and pl[1] != nullid:
1435 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1461 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1436 return 1
1462 return 1
1437
1463
1438 err = False
1464 err = False
1439
1465
1440 p1, p2 = pl[0], node
1466 p1, p2 = pl[0], node
1441 pa = self.changelog.ancestor(p1, p2)
1467 pa = self.changelog.ancestor(p1, p2)
1442 m1n = self.changelog.read(p1)[0]
1468 m1n = self.changelog.read(p1)[0]
1443 m2n = self.changelog.read(p2)[0]
1469 m2n = self.changelog.read(p2)[0]
1444 man = self.manifest.ancestor(m1n, m2n)
1470 man = self.manifest.ancestor(m1n, m2n)
1445 m1 = self.manifest.read(m1n)
1471 m1 = self.manifest.read(m1n)
1446 mf1 = self.manifest.readflags(m1n)
1472 mf1 = self.manifest.readflags(m1n)
1447 m2 = self.manifest.read(m2n).copy()
1473 m2 = self.manifest.read(m2n).copy()
1448 mf2 = self.manifest.readflags(m2n)
1474 mf2 = self.manifest.readflags(m2n)
1449 ma = self.manifest.read(man)
1475 ma = self.manifest.read(man)
1450 mfa = self.manifest.readflags(man)
1476 mfa = self.manifest.readflags(man)
1451
1477
1452 modified, added, removed, deleted, unknown = self.changes()
1478 modified, added, removed, deleted, unknown = self.changes()
1453
1479
1454 # is this a jump, or a merge? i.e. is there a linear path
1480 # is this a jump, or a merge? i.e. is there a linear path
1455 # from p1 to p2?
1481 # from p1 to p2?
1456 linear_path = (pa == p1 or pa == p2)
1482 linear_path = (pa == p1 or pa == p2)
1457
1483
1458 if allow and linear_path:
1484 if allow and linear_path:
1459 raise util.Abort(_("there is nothing to merge, "
1485 raise util.Abort(_("there is nothing to merge, "
1460 "just use 'hg update'"))
1486 "just use 'hg update'"))
1461 if allow and not forcemerge:
1487 if allow and not forcemerge:
1462 if modified or added or removed:
1488 if modified or added or removed:
1463 raise util.Abort(_("outstanding uncommitted changes"))
1489 raise util.Abort(_("outstanding uncommitted changes"))
1464 if not forcemerge and not force:
1490 if not forcemerge and not force:
1465 for f in unknown:
1491 for f in unknown:
1466 if f in m2:
1492 if f in m2:
1467 t1 = self.wread(f)
1493 t1 = self.wread(f)
1468 t2 = self.file(f).read(m2[f])
1494 t2 = self.file(f).read(m2[f])
1469 if cmp(t1, t2) != 0:
1495 if cmp(t1, t2) != 0:
1470 raise util.Abort(_("'%s' already exists in the working"
1496 raise util.Abort(_("'%s' already exists in the working"
1471 " dir and differs from remote") % f)
1497 " dir and differs from remote") % f)
1472
1498
1473 # resolve the manifest to determine which files
1499 # resolve the manifest to determine which files
1474 # we care about merging
1500 # we care about merging
1475 self.ui.note(_("resolving manifests\n"))
1501 self.ui.note(_("resolving manifests\n"))
1476 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1502 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1477 (force, allow, moddirstate, linear_path))
1503 (force, allow, moddirstate, linear_path))
1478 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1504 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1479 (short(man), short(m1n), short(m2n)))
1505 (short(man), short(m1n), short(m2n)))
1480
1506
1481 merge = {}
1507 merge = {}
1482 get = {}
1508 get = {}
1483 remove = []
1509 remove = []
1484
1510
1485 # construct a working dir manifest
1511 # construct a working dir manifest
1486 mw = m1.copy()
1512 mw = m1.copy()
1487 mfw = mf1.copy()
1513 mfw = mf1.copy()
1488 umap = dict.fromkeys(unknown)
1514 umap = dict.fromkeys(unknown)
1489
1515
1490 for f in added + modified + unknown:
1516 for f in added + modified + unknown:
1491 mw[f] = ""
1517 mw[f] = ""
1492 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1518 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1493
1519
1494 if moddirstate and not wlock:
1520 if moddirstate and not wlock:
1495 wlock = self.wlock()
1521 wlock = self.wlock()
1496
1522
1497 for f in deleted + removed:
1523 for f in deleted + removed:
1498 if f in mw:
1524 if f in mw:
1499 del mw[f]
1525 del mw[f]
1500
1526
1501 # If we're jumping between revisions (as opposed to merging),
1527 # If we're jumping between revisions (as opposed to merging),
1502 # and if neither the working directory nor the target rev has
1528 # and if neither the working directory nor the target rev has
1503 # the file, then we need to remove it from the dirstate, to
1529 # the file, then we need to remove it from the dirstate, to
1504 # prevent the dirstate from listing the file when it is no
1530 # prevent the dirstate from listing the file when it is no
1505 # longer in the manifest.
1531 # longer in the manifest.
1506 if moddirstate and linear_path and f not in m2:
1532 if moddirstate and linear_path and f not in m2:
1507 self.dirstate.forget((f,))
1533 self.dirstate.forget((f,))
1508
1534
1509 # Compare manifests
1535 # Compare manifests
1510 for f, n in mw.iteritems():
1536 for f, n in mw.iteritems():
1511 if choose and not choose(f):
1537 if choose and not choose(f):
1512 continue
1538 continue
1513 if f in m2:
1539 if f in m2:
1514 s = 0
1540 s = 0
1515
1541
1516 # is the wfile new since m1, and match m2?
1542 # is the wfile new since m1, and match m2?
1517 if f not in m1:
1543 if f not in m1:
1518 t1 = self.wread(f)
1544 t1 = self.wread(f)
1519 t2 = self.file(f).read(m2[f])
1545 t2 = self.file(f).read(m2[f])
1520 if cmp(t1, t2) == 0:
1546 if cmp(t1, t2) == 0:
1521 n = m2[f]
1547 n = m2[f]
1522 del t1, t2
1548 del t1, t2
1523
1549
1524 # are files different?
1550 # are files different?
1525 if n != m2[f]:
1551 if n != m2[f]:
1526 a = ma.get(f, nullid)
1552 a = ma.get(f, nullid)
1527 # are both different from the ancestor?
1553 # are both different from the ancestor?
1528 if n != a and m2[f] != a:
1554 if n != a and m2[f] != a:
1529 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1555 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1530 # merge executable bits
1556 # merge executable bits
1531 # "if we changed or they changed, change in merge"
1557 # "if we changed or they changed, change in merge"
1532 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1558 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1533 mode = ((a^b) | (a^c)) ^ a
1559 mode = ((a^b) | (a^c)) ^ a
1534 merge[f] = (m1.get(f, nullid), m2[f], mode)
1560 merge[f] = (m1.get(f, nullid), m2[f], mode)
1535 s = 1
1561 s = 1
1536 # are we clobbering?
1562 # are we clobbering?
1537 # is remote's version newer?
1563 # is remote's version newer?
1538 # or are we going back in time?
1564 # or are we going back in time?
1539 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1565 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1540 self.ui.debug(_(" remote %s is newer, get\n") % f)
1566 self.ui.debug(_(" remote %s is newer, get\n") % f)
1541 get[f] = m2[f]
1567 get[f] = m2[f]
1542 s = 1
1568 s = 1
1543 elif f in umap:
1569 elif f in umap:
1544 # this unknown file is the same as the checkout
1570 # this unknown file is the same as the checkout
1545 get[f] = m2[f]
1571 get[f] = m2[f]
1546
1572
1547 if not s and mfw[f] != mf2[f]:
1573 if not s and mfw[f] != mf2[f]:
1548 if force:
1574 if force:
1549 self.ui.debug(_(" updating permissions for %s\n") % f)
1575 self.ui.debug(_(" updating permissions for %s\n") % f)
1550 util.set_exec(self.wjoin(f), mf2[f])
1576 util.set_exec(self.wjoin(f), mf2[f])
1551 else:
1577 else:
1552 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1578 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1553 mode = ((a^b) | (a^c)) ^ a
1579 mode = ((a^b) | (a^c)) ^ a
1554 if mode != b:
1580 if mode != b:
1555 self.ui.debug(_(" updating permissions for %s\n")
1581 self.ui.debug(_(" updating permissions for %s\n")
1556 % f)
1582 % f)
1557 util.set_exec(self.wjoin(f), mode)
1583 util.set_exec(self.wjoin(f), mode)
1558 del m2[f]
1584 del m2[f]
1559 elif f in ma:
1585 elif f in ma:
1560 if n != ma[f]:
1586 if n != ma[f]:
1561 r = _("d")
1587 r = _("d")
1562 if not force and (linear_path or allow):
1588 if not force and (linear_path or allow):
1563 r = self.ui.prompt(
1589 r = self.ui.prompt(
1564 (_(" local changed %s which remote deleted\n") % f) +
1590 (_(" local changed %s which remote deleted\n") % f) +
1565 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1591 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1566 if r == _("d"):
1592 if r == _("d"):
1567 remove.append(f)
1593 remove.append(f)
1568 else:
1594 else:
1569 self.ui.debug(_("other deleted %s\n") % f)
1595 self.ui.debug(_("other deleted %s\n") % f)
1570 remove.append(f) # other deleted it
1596 remove.append(f) # other deleted it
1571 else:
1597 else:
1572 # file is created on branch or in working directory
1598 # file is created on branch or in working directory
1573 if force and f not in umap:
1599 if force and f not in umap:
1574 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1600 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1575 remove.append(f)
1601 remove.append(f)
1576 elif n == m1.get(f, nullid): # same as parent
1602 elif n == m1.get(f, nullid): # same as parent
1577 if p2 == pa: # going backwards?
1603 if p2 == pa: # going backwards?
1578 self.ui.debug(_("remote deleted %s\n") % f)
1604 self.ui.debug(_("remote deleted %s\n") % f)
1579 remove.append(f)
1605 remove.append(f)
1580 else:
1606 else:
1581 self.ui.debug(_("local modified %s, keeping\n") % f)
1607 self.ui.debug(_("local modified %s, keeping\n") % f)
1582 else:
1608 else:
1583 self.ui.debug(_("working dir created %s, keeping\n") % f)
1609 self.ui.debug(_("working dir created %s, keeping\n") % f)
1584
1610
1585 for f, n in m2.iteritems():
1611 for f, n in m2.iteritems():
1586 if choose and not choose(f):
1612 if choose and not choose(f):
1587 continue
1613 continue
1588 if f[0] == "/":
1614 if f[0] == "/":
1589 continue
1615 continue
1590 if f in ma and n != ma[f]:
1616 if f in ma and n != ma[f]:
1591 r = _("k")
1617 r = _("k")
1592 if not force and (linear_path or allow):
1618 if not force and (linear_path or allow):
1593 r = self.ui.prompt(
1619 r = self.ui.prompt(
1594 (_("remote changed %s which local deleted\n") % f) +
1620 (_("remote changed %s which local deleted\n") % f) +
1595 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1621 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1596 if r == _("k"):
1622 if r == _("k"):
1597 get[f] = n
1623 get[f] = n
1598 elif f not in ma:
1624 elif f not in ma:
1599 self.ui.debug(_("remote created %s\n") % f)
1625 self.ui.debug(_("remote created %s\n") % f)
1600 get[f] = n
1626 get[f] = n
1601 else:
1627 else:
1602 if force or p2 == pa: # going backwards?
1628 if force or p2 == pa: # going backwards?
1603 self.ui.debug(_("local deleted %s, recreating\n") % f)
1629 self.ui.debug(_("local deleted %s, recreating\n") % f)
1604 get[f] = n
1630 get[f] = n
1605 else:
1631 else:
1606 self.ui.debug(_("local deleted %s\n") % f)
1632 self.ui.debug(_("local deleted %s\n") % f)
1607
1633
1608 del mw, m1, m2, ma
1634 del mw, m1, m2, ma
1609
1635
1610 if force:
1636 if force:
1611 for f in merge:
1637 for f in merge:
1612 get[f] = merge[f][1]
1638 get[f] = merge[f][1]
1613 merge = {}
1639 merge = {}
1614
1640
1615 if linear_path or force:
1641 if linear_path or force:
1616 # we don't need to do any magic, just jump to the new rev
1642 # we don't need to do any magic, just jump to the new rev
1617 branch_merge = False
1643 branch_merge = False
1618 p1, p2 = p2, nullid
1644 p1, p2 = p2, nullid
1619 else:
1645 else:
1620 if not allow:
1646 if not allow:
1621 self.ui.status(_("this update spans a branch"
1647 self.ui.status(_("this update spans a branch"
1622 " affecting the following files:\n"))
1648 " affecting the following files:\n"))
1623 fl = merge.keys() + get.keys()
1649 fl = merge.keys() + get.keys()
1624 fl.sort()
1650 fl.sort()
1625 for f in fl:
1651 for f in fl:
1626 cf = ""
1652 cf = ""
1627 if f in merge:
1653 if f in merge:
1628 cf = _(" (resolve)")
1654 cf = _(" (resolve)")
1629 self.ui.status(" %s%s\n" % (f, cf))
1655 self.ui.status(" %s%s\n" % (f, cf))
1630 self.ui.warn(_("aborting update spanning branches!\n"))
1656 self.ui.warn(_("aborting update spanning branches!\n"))
1631 self.ui.status(_("(use 'hg merge' to merge across branches"
1657 self.ui.status(_("(use 'hg merge' to merge across branches"
1632 " or '-C' to lose changes)\n"))
1658 " or '-C' to lose changes)\n"))
1633 return 1
1659 return 1
1634 branch_merge = True
1660 branch_merge = True
1635
1661
1636 # get the files we don't need to change
1662 # get the files we don't need to change
1637 files = get.keys()
1663 files = get.keys()
1638 files.sort()
1664 files.sort()
1639 for f in files:
1665 for f in files:
1640 if f[0] == "/":
1666 if f[0] == "/":
1641 continue
1667 continue
1642 self.ui.note(_("getting %s\n") % f)
1668 self.ui.note(_("getting %s\n") % f)
1643 t = self.file(f).read(get[f])
1669 t = self.file(f).read(get[f])
1644 self.wwrite(f, t)
1670 self.wwrite(f, t)
1645 util.set_exec(self.wjoin(f), mf2[f])
1671 util.set_exec(self.wjoin(f), mf2[f])
1646 if moddirstate:
1672 if moddirstate:
1647 if branch_merge:
1673 if branch_merge:
1648 self.dirstate.update([f], 'n', st_mtime=-1)
1674 self.dirstate.update([f], 'n', st_mtime=-1)
1649 else:
1675 else:
1650 self.dirstate.update([f], 'n')
1676 self.dirstate.update([f], 'n')
1651
1677
1652 # merge the tricky bits
1678 # merge the tricky bits
1653 failedmerge = []
1679 failedmerge = []
1654 files = merge.keys()
1680 files = merge.keys()
1655 files.sort()
1681 files.sort()
1656 xp1 = hex(p1)
1682 xp1 = hex(p1)
1657 xp2 = hex(p2)
1683 xp2 = hex(p2)
1658 for f in files:
1684 for f in files:
1659 self.ui.status(_("merging %s\n") % f)
1685 self.ui.status(_("merging %s\n") % f)
1660 my, other, flag = merge[f]
1686 my, other, flag = merge[f]
1661 ret = self.merge3(f, my, other, xp1, xp2)
1687 ret = self.merge3(f, my, other, xp1, xp2)
1662 if ret:
1688 if ret:
1663 err = True
1689 err = True
1664 failedmerge.append(f)
1690 failedmerge.append(f)
1665 util.set_exec(self.wjoin(f), flag)
1691 util.set_exec(self.wjoin(f), flag)
1666 if moddirstate:
1692 if moddirstate:
1667 if branch_merge:
1693 if branch_merge:
1668 # We've done a branch merge, mark this file as merged
1694 # We've done a branch merge, mark this file as merged
1669 # so that we properly record the merger later
1695 # so that we properly record the merger later
1670 self.dirstate.update([f], 'm')
1696 self.dirstate.update([f], 'm')
1671 else:
1697 else:
1672 # We've update-merged a locally modified file, so
1698 # We've update-merged a locally modified file, so
1673 # we set the dirstate to emulate a normal checkout
1699 # we set the dirstate to emulate a normal checkout
1674 # of that file some time in the past. Thus our
1700 # of that file some time in the past. Thus our
1675 # merge will appear as a normal local file
1701 # merge will appear as a normal local file
1676 # modification.
1702 # modification.
1677 f_len = len(self.file(f).read(other))
1703 f_len = len(self.file(f).read(other))
1678 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1704 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1679
1705
1680 remove.sort()
1706 remove.sort()
1681 for f in remove:
1707 for f in remove:
1682 self.ui.note(_("removing %s\n") % f)
1708 self.ui.note(_("removing %s\n") % f)
1683 util.audit_path(f)
1709 util.audit_path(f)
1684 try:
1710 try:
1685 util.unlink(self.wjoin(f))
1711 util.unlink(self.wjoin(f))
1686 except OSError, inst:
1712 except OSError, inst:
1687 if inst.errno != errno.ENOENT:
1713 if inst.errno != errno.ENOENT:
1688 self.ui.warn(_("update failed to remove %s: %s!\n") %
1714 self.ui.warn(_("update failed to remove %s: %s!\n") %
1689 (f, inst.strerror))
1715 (f, inst.strerror))
1690 if moddirstate:
1716 if moddirstate:
1691 if branch_merge:
1717 if branch_merge:
1692 self.dirstate.update(remove, 'r')
1718 self.dirstate.update(remove, 'r')
1693 else:
1719 else:
1694 self.dirstate.forget(remove)
1720 self.dirstate.forget(remove)
1695
1721
1696 if moddirstate:
1722 if moddirstate:
1697 self.dirstate.setparents(p1, p2)
1723 self.dirstate.setparents(p1, p2)
1698
1724
1699 stat = ((len(get), _("updated")),
1725 stat = ((len(get), _("updated")),
1700 (len(merge) - len(failedmerge), _("merged")),
1726 (len(merge) - len(failedmerge), _("merged")),
1701 (len(remove), _("removed")),
1727 (len(remove), _("removed")),
1702 (len(failedmerge), _("unresolved")))
1728 (len(failedmerge), _("unresolved")))
1703 note = ", ".join([_("%d files %s") % s for s in stat])
1729 note = ", ".join([_("%d files %s") % s for s in stat])
1704 self.ui.note("%s\n" % note)
1730 self.ui.note("%s\n" % note)
1705 if moddirstate and branch_merge:
1731 if moddirstate and branch_merge:
1706 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1732 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1707
1733
1708 return err
1734 return err
1709
1735
1710 def merge3(self, fn, my, other, p1, p2):
1736 def merge3(self, fn, my, other, p1, p2):
1711 """perform a 3-way merge in the working directory"""
1737 """perform a 3-way merge in the working directory"""
1712
1738
1713 def temp(prefix, node):
1739 def temp(prefix, node):
1714 pre = "%s~%s." % (os.path.basename(fn), prefix)
1740 pre = "%s~%s." % (os.path.basename(fn), prefix)
1715 (fd, name) = tempfile.mkstemp("", pre)
1741 (fd, name) = tempfile.mkstemp("", pre)
1716 f = os.fdopen(fd, "wb")
1742 f = os.fdopen(fd, "wb")
1717 self.wwrite(fn, fl.read(node), f)
1743 self.wwrite(fn, fl.read(node), f)
1718 f.close()
1744 f.close()
1719 return name
1745 return name
1720
1746
1721 fl = self.file(fn)
1747 fl = self.file(fn)
1722 base = fl.ancestor(my, other)
1748 base = fl.ancestor(my, other)
1723 a = self.wjoin(fn)
1749 a = self.wjoin(fn)
1724 b = temp("base", base)
1750 b = temp("base", base)
1725 c = temp("other", other)
1751 c = temp("other", other)
1726
1752
1727 self.ui.note(_("resolving %s\n") % fn)
1753 self.ui.note(_("resolving %s\n") % fn)
1728 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1754 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1729 (fn, short(my), short(other), short(base)))
1755 (fn, short(my), short(other), short(base)))
1730
1756
1731 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1757 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1732 or "hgmerge")
1758 or "hgmerge")
1733 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1759 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1734 environ={'HG_FILE': fn,
1760 environ={'HG_FILE': fn,
1735 'HG_MY_NODE': p1,
1761 'HG_MY_NODE': p1,
1736 'HG_OTHER_NODE': p2,
1762 'HG_OTHER_NODE': p2,
1737 'HG_FILE_MY_NODE': hex(my),
1763 'HG_FILE_MY_NODE': hex(my),
1738 'HG_FILE_OTHER_NODE': hex(other),
1764 'HG_FILE_OTHER_NODE': hex(other),
1739 'HG_FILE_BASE_NODE': hex(base)})
1765 'HG_FILE_BASE_NODE': hex(base)})
1740 if r:
1766 if r:
1741 self.ui.warn(_("merging %s failed!\n") % fn)
1767 self.ui.warn(_("merging %s failed!\n") % fn)
1742
1768
1743 os.unlink(b)
1769 os.unlink(b)
1744 os.unlink(c)
1770 os.unlink(c)
1745 return r
1771 return r
1746
1772
1747 def verify(self):
1773 def verify(self):
1748 filelinkrevs = {}
1774 filelinkrevs = {}
1749 filenodes = {}
1775 filenodes = {}
1750 changesets = revisions = files = 0
1776 changesets = revisions = files = 0
1751 errors = [0]
1777 errors = [0]
1752 neededmanifests = {}
1778 neededmanifests = {}
1753
1779
1754 def err(msg):
1780 def err(msg):
1755 self.ui.warn(msg + "\n")
1781 self.ui.warn(msg + "\n")
1756 errors[0] += 1
1782 errors[0] += 1
1757
1783
1758 def checksize(obj, name):
1784 def checksize(obj, name):
1759 d = obj.checksize()
1785 d = obj.checksize()
1760 if d[0]:
1786 if d[0]:
1761 err(_("%s data length off by %d bytes") % (name, d[0]))
1787 err(_("%s data length off by %d bytes") % (name, d[0]))
1762 if d[1]:
1788 if d[1]:
1763 err(_("%s index contains %d extra bytes") % (name, d[1]))
1789 err(_("%s index contains %d extra bytes") % (name, d[1]))
1764
1790
1765 seen = {}
1791 seen = {}
1766 self.ui.status(_("checking changesets\n"))
1792 self.ui.status(_("checking changesets\n"))
1767 checksize(self.changelog, "changelog")
1793 checksize(self.changelog, "changelog")
1768
1794
1769 for i in range(self.changelog.count()):
1795 for i in range(self.changelog.count()):
1770 changesets += 1
1796 changesets += 1
1771 n = self.changelog.node(i)
1797 n = self.changelog.node(i)
1772 l = self.changelog.linkrev(n)
1798 l = self.changelog.linkrev(n)
1773 if l != i:
1799 if l != i:
1774 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1800 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1775 if n in seen:
1801 if n in seen:
1776 err(_("duplicate changeset at revision %d") % i)
1802 err(_("duplicate changeset at revision %d") % i)
1777 seen[n] = 1
1803 seen[n] = 1
1778
1804
1779 for p in self.changelog.parents(n):
1805 for p in self.changelog.parents(n):
1780 if p not in self.changelog.nodemap:
1806 if p not in self.changelog.nodemap:
1781 err(_("changeset %s has unknown parent %s") %
1807 err(_("changeset %s has unknown parent %s") %
1782 (short(n), short(p)))
1808 (short(n), short(p)))
1783 try:
1809 try:
1784 changes = self.changelog.read(n)
1810 changes = self.changelog.read(n)
1785 except KeyboardInterrupt:
1811 except KeyboardInterrupt:
1786 self.ui.warn(_("interrupted"))
1812 self.ui.warn(_("interrupted"))
1787 raise
1813 raise
1788 except Exception, inst:
1814 except Exception, inst:
1789 err(_("unpacking changeset %s: %s") % (short(n), inst))
1815 err(_("unpacking changeset %s: %s") % (short(n), inst))
1790 continue
1816 continue
1791
1817
1792 neededmanifests[changes[0]] = n
1818 neededmanifests[changes[0]] = n
1793
1819
1794 for f in changes[3]:
1820 for f in changes[3]:
1795 filelinkrevs.setdefault(f, []).append(i)
1821 filelinkrevs.setdefault(f, []).append(i)
1796
1822
1797 seen = {}
1823 seen = {}
1798 self.ui.status(_("checking manifests\n"))
1824 self.ui.status(_("checking manifests\n"))
1799 checksize(self.manifest, "manifest")
1825 checksize(self.manifest, "manifest")
1800
1826
1801 for i in range(self.manifest.count()):
1827 for i in range(self.manifest.count()):
1802 n = self.manifest.node(i)
1828 n = self.manifest.node(i)
1803 l = self.manifest.linkrev(n)
1829 l = self.manifest.linkrev(n)
1804
1830
1805 if l < 0 or l >= self.changelog.count():
1831 if l < 0 or l >= self.changelog.count():
1806 err(_("bad manifest link (%d) at revision %d") % (l, i))
1832 err(_("bad manifest link (%d) at revision %d") % (l, i))
1807
1833
1808 if n in neededmanifests:
1834 if n in neededmanifests:
1809 del neededmanifests[n]
1835 del neededmanifests[n]
1810
1836
1811 if n in seen:
1837 if n in seen:
1812 err(_("duplicate manifest at revision %d") % i)
1838 err(_("duplicate manifest at revision %d") % i)
1813
1839
1814 seen[n] = 1
1840 seen[n] = 1
1815
1841
1816 for p in self.manifest.parents(n):
1842 for p in self.manifest.parents(n):
1817 if p not in self.manifest.nodemap:
1843 if p not in self.manifest.nodemap:
1818 err(_("manifest %s has unknown parent %s") %
1844 err(_("manifest %s has unknown parent %s") %
1819 (short(n), short(p)))
1845 (short(n), short(p)))
1820
1846
1821 try:
1847 try:
1822 delta = mdiff.patchtext(self.manifest.delta(n))
1848 delta = mdiff.patchtext(self.manifest.delta(n))
1823 except KeyboardInterrupt:
1849 except KeyboardInterrupt:
1824 self.ui.warn(_("interrupted"))
1850 self.ui.warn(_("interrupted"))
1825 raise
1851 raise
1826 except Exception, inst:
1852 except Exception, inst:
1827 err(_("unpacking manifest %s: %s") % (short(n), inst))
1853 err(_("unpacking manifest %s: %s") % (short(n), inst))
1828 continue
1854 continue
1829
1855
1830 try:
1856 try:
1831 ff = [ l.split('\0') for l in delta.splitlines() ]
1857 ff = [ l.split('\0') for l in delta.splitlines() ]
1832 for f, fn in ff:
1858 for f, fn in ff:
1833 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1859 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1834 except (ValueError, TypeError), inst:
1860 except (ValueError, TypeError), inst:
1835 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1861 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1836
1862
1837 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1863 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1838
1864
1839 for m, c in neededmanifests.items():
1865 for m, c in neededmanifests.items():
1840 err(_("Changeset %s refers to unknown manifest %s") %
1866 err(_("Changeset %s refers to unknown manifest %s") %
1841 (short(m), short(c)))
1867 (short(m), short(c)))
1842 del neededmanifests
1868 del neededmanifests
1843
1869
1844 for f in filenodes:
1870 for f in filenodes:
1845 if f not in filelinkrevs:
1871 if f not in filelinkrevs:
1846 err(_("file %s in manifest but not in changesets") % f)
1872 err(_("file %s in manifest but not in changesets") % f)
1847
1873
1848 for f in filelinkrevs:
1874 for f in filelinkrevs:
1849 if f not in filenodes:
1875 if f not in filenodes:
1850 err(_("file %s in changeset but not in manifest") % f)
1876 err(_("file %s in changeset but not in manifest") % f)
1851
1877
1852 self.ui.status(_("checking files\n"))
1878 self.ui.status(_("checking files\n"))
1853 ff = filenodes.keys()
1879 ff = filenodes.keys()
1854 ff.sort()
1880 ff.sort()
1855 for f in ff:
1881 for f in ff:
1856 if f == "/dev/null":
1882 if f == "/dev/null":
1857 continue
1883 continue
1858 files += 1
1884 files += 1
1859 if not f:
1885 if not f:
1860 err(_("file without name in manifest %s") % short(n))
1886 err(_("file without name in manifest %s") % short(n))
1861 continue
1887 continue
1862 fl = self.file(f)
1888 fl = self.file(f)
1863 checksize(fl, f)
1889 checksize(fl, f)
1864
1890
1865 nodes = {nullid: 1}
1891 nodes = {nullid: 1}
1866 seen = {}
1892 seen = {}
1867 for i in range(fl.count()):
1893 for i in range(fl.count()):
1868 revisions += 1
1894 revisions += 1
1869 n = fl.node(i)
1895 n = fl.node(i)
1870
1896
1871 if n in seen:
1897 if n in seen:
1872 err(_("%s: duplicate revision %d") % (f, i))
1898 err(_("%s: duplicate revision %d") % (f, i))
1873 if n not in filenodes[f]:
1899 if n not in filenodes[f]:
1874 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1900 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1875 else:
1901 else:
1876 del filenodes[f][n]
1902 del filenodes[f][n]
1877
1903
1878 flr = fl.linkrev(n)
1904 flr = fl.linkrev(n)
1879 if flr not in filelinkrevs.get(f, []):
1905 if flr not in filelinkrevs.get(f, []):
1880 err(_("%s:%s points to unexpected changeset %d")
1906 err(_("%s:%s points to unexpected changeset %d")
1881 % (f, short(n), flr))
1907 % (f, short(n), flr))
1882 else:
1908 else:
1883 filelinkrevs[f].remove(flr)
1909 filelinkrevs[f].remove(flr)
1884
1910
1885 # verify contents
1911 # verify contents
1886 try:
1912 try:
1887 t = fl.read(n)
1913 t = fl.read(n)
1888 except KeyboardInterrupt:
1914 except KeyboardInterrupt:
1889 self.ui.warn(_("interrupted"))
1915 self.ui.warn(_("interrupted"))
1890 raise
1916 raise
1891 except Exception, inst:
1917 except Exception, inst:
1892 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1918 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1893
1919
1894 # verify parents
1920 # verify parents
1895 (p1, p2) = fl.parents(n)
1921 (p1, p2) = fl.parents(n)
1896 if p1 not in nodes:
1922 if p1 not in nodes:
1897 err(_("file %s:%s unknown parent 1 %s") %
1923 err(_("file %s:%s unknown parent 1 %s") %
1898 (f, short(n), short(p1)))
1924 (f, short(n), short(p1)))
1899 if p2 not in nodes:
1925 if p2 not in nodes:
1900 err(_("file %s:%s unknown parent 2 %s") %
1926 err(_("file %s:%s unknown parent 2 %s") %
1901 (f, short(n), short(p1)))
1927 (f, short(n), short(p1)))
1902 nodes[n] = 1
1928 nodes[n] = 1
1903
1929
1904 # cross-check
1930 # cross-check
1905 for node in filenodes[f]:
1931 for node in filenodes[f]:
1906 err(_("node %s in manifests not in %s") % (hex(node), f))
1932 err(_("node %s in manifests not in %s") % (hex(node), f))
1907
1933
1908 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1934 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1909 (files, changesets, revisions))
1935 (files, changesets, revisions))
1910
1936
1911 if errors[0]:
1937 if errors[0]:
1912 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1938 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1913 return 1
1939 return 1
1914
1940
1915 # used to avoid circular references so destructors work
1941 # used to avoid circular references so destructors work
1916 def aftertrans(base):
1942 def aftertrans(base):
1917 p = base
1943 p = base
1918 def a():
1944 def a():
1919 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1945 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1920 util.rename(os.path.join(p, "journal.dirstate"),
1946 util.rename(os.path.join(p, "journal.dirstate"),
1921 os.path.join(p, "undo.dirstate"))
1947 os.path.join(p, "undo.dirstate"))
1922 return a
1948 return a
1923
1949
@@ -1,28 +1,55 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir a
3 mkdir a
4 cd a
4 cd a
5 hg init
5 hg init
6 echo foo > t1
6 echo foo > t1
7 hg add t1
7 hg add t1
8 hg commit -m "1" -d "1000000 0"
8 hg commit -m "1" -d "1000000 0"
9
9
10 cd ..
10 cd ..
11 hg clone a b
11 hg clone a b
12
12
13 cd a
13 cd a
14 echo foo > t2
14 echo foo > t2
15 hg add t2
15 hg add t2
16 hg commit -m "2" -d "1000000 0"
16 hg commit -m "2" -d "1000000 0"
17
17
18 cd ../b
18 cd ../b
19 echo foo > t3
19 echo foo > t3
20 hg add t3
20 hg add t3
21 hg commit -m "3" -d "1000000 0"
21 hg commit -m "3" -d "1000000 0"
22
22
23 hg push ../a
23 hg push ../a
24 hg pull ../a
24 hg pull ../a
25 hg push ../a
25 hg push ../a
26 hg up -m
26 hg up -m
27 hg commit -m "4" -d "1000000 0"
27 hg commit -m "4" -d "1000000 0"
28 hg push ../a
28 hg push ../a
29 cd ..
30
31 hg init c
32 cd c
33 for i in 0 1 2; do
34 echo $i >> foo
35 hg ci -Am $i -d "1000000 0"
36 done
37 cd ..
38
39 hg clone c d
40 cd d
41 for i in 0 1; do
42 hg co -C $i
43 echo d-$i >> foo
44 hg ci -m d-$i -d "1000000 0"
45 done
46
47 HGMERGE=true hg co -m 3
48 hg ci -m c-d -d "1000000 0"
49
50 hg push ../c
51 hg push -r 2 ../c
52 hg push -r 3 -r 4 ../c
53 hg push -r 5 ../c
54
55 exit 0
@@ -1,21 +1,38 b''
1 pushing to ../a
1 pushing to ../a
2 searching for changes
2 searching for changes
3 abort: unsynced remote changes!
3 abort: unsynced remote changes!
4 (did you forget to sync? use push -f to force)
4 (did you forget to sync? use push -f to force)
5 pulling from ../a
5 pulling from ../a
6 searching for changes
6 searching for changes
7 adding changesets
7 adding changesets
8 adding manifests
8 adding manifests
9 adding file changes
9 adding file changes
10 added 1 changesets with 1 changes to 1 files (+1 heads)
10 added 1 changesets with 1 changes to 1 files (+1 heads)
11 (run 'hg heads' to see heads, 'hg merge' to merge)
11 (run 'hg heads' to see heads, 'hg merge' to merge)
12 pushing to ../a
12 pushing to ../a
13 searching for changes
13 searching for changes
14 abort: push creates new remote branches!
14 abort: push creates new remote branches!
15 (did you forget to merge? use push -f to force)
15 (did you forget to merge? use push -f to force)
16 pushing to ../a
16 pushing to ../a
17 searching for changes
17 searching for changes
18 adding changesets
18 adding changesets
19 adding manifests
19 adding manifests
20 adding file changes
20 adding file changes
21 added 2 changesets with 1 changes to 1 files
21 added 2 changesets with 1 changes to 1 files
22 adding foo
23 merging foo
24 pushing to ../c
25 searching for changes
26 abort: push creates new remote branches!
27 (did you forget to merge? use push -f to force)
28 pushing to ../c
29 searching for changes
30 no changes found
31 pushing to ../c
32 searching for changes
33 abort: push creates new remote branches!
34 (did you forget to merge? use push -f to force)
35 pushing to ../c
36 searching for changes
37 abort: push creates new remote branches!
38 (did you forget to merge? use push -f to force)
General Comments 0
You need to be logged in to leave comments. Login now