##// END OF EJS Templates
n is always 'True', we can only stop the loop with the break statement
Benoit Boissinot -
r2345:4f7745fc default
parent child Browse files
Show More
@@ -1,2109 +1,2109 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "revlog traceback")
15 demandload(globals(), "revlog traceback")
16
16
17 class localrepository(object):
17 class localrepository(object):
18 def __del__(self):
18 def __del__(self):
19 self.transhandle = None
19 self.transhandle = None
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 if not path:
21 if not path:
22 p = os.getcwd()
22 p = os.getcwd()
23 while not os.path.isdir(os.path.join(p, ".hg")):
23 while not os.path.isdir(os.path.join(p, ".hg")):
24 oldp = p
24 oldp = p
25 p = os.path.dirname(p)
25 p = os.path.dirname(p)
26 if p == oldp:
26 if p == oldp:
27 raise repo.RepoError(_("no repo found"))
27 raise repo.RepoError(_("no repo found"))
28 path = p
28 path = p
29 self.path = os.path.join(path, ".hg")
29 self.path = os.path.join(path, ".hg")
30
30
31 if not create and not os.path.isdir(self.path):
31 if not create and not os.path.isdir(self.path):
32 raise repo.RepoError(_("repository %s not found") % path)
32 raise repo.RepoError(_("repository %s not found") % path)
33
33
34 self.root = os.path.abspath(path)
34 self.root = os.path.abspath(path)
35 self.origroot = path
35 self.origroot = path
36 self.ui = ui.ui(parentui=parentui)
36 self.ui = ui.ui(parentui=parentui)
37 self.opener = util.opener(self.path)
37 self.opener = util.opener(self.path)
38 self.wopener = util.opener(self.root)
38 self.wopener = util.opener(self.root)
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 v = self.ui.revlogopts
45 v = self.ui.revlogopts
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 fl = v.get('flags', None)
48 fl = v.get('flags', None)
49 flags = 0
49 flags = 0
50 if fl != None:
50 if fl != None:
51 for x in fl.split():
51 for x in fl.split():
52 flags |= revlog.flagstr(x)
52 flags |= revlog.flagstr(x)
53 elif self.revlogv1:
53 elif self.revlogv1:
54 flags = revlog.REVLOG_DEFAULT_FLAGS
54 flags = revlog.REVLOG_DEFAULT_FLAGS
55
55
56 v = self.revlogversion | flags
56 v = self.revlogversion | flags
57 self.manifest = manifest.manifest(self.opener, v)
57 self.manifest = manifest.manifest(self.opener, v)
58 self.changelog = changelog.changelog(self.opener, v)
58 self.changelog = changelog.changelog(self.opener, v)
59
59
60 # the changelog might not have the inline index flag
60 # the changelog might not have the inline index flag
61 # on. If the format of the changelog is the same as found in
61 # on. If the format of the changelog is the same as found in
62 # .hgrc, apply any flags found in the .hgrc as well.
62 # .hgrc, apply any flags found in the .hgrc as well.
63 # Otherwise, just version from the changelog
63 # Otherwise, just version from the changelog
64 v = self.changelog.version
64 v = self.changelog.version
65 if v == self.revlogversion:
65 if v == self.revlogversion:
66 v |= flags
66 v |= flags
67 self.revlogversion = v
67 self.revlogversion = v
68
68
69 self.tagscache = None
69 self.tagscache = None
70 self.nodetagscache = None
70 self.nodetagscache = None
71 self.encodepats = None
71 self.encodepats = None
72 self.decodepats = None
72 self.decodepats = None
73 self.transhandle = None
73 self.transhandle = None
74
74
75 if create:
75 if create:
76 os.mkdir(self.path)
76 os.mkdir(self.path)
77 os.mkdir(self.join("data"))
77 os.mkdir(self.join("data"))
78
78
79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80
80
81 def hook(self, name, throw=False, **args):
81 def hook(self, name, throw=False, **args):
82 def callhook(hname, funcname):
82 def callhook(hname, funcname):
83 '''call python hook. hook is callable object, looked up as
83 '''call python hook. hook is callable object, looked up as
84 name in python module. if callable returns "true", hook
84 name in python module. if callable returns "true", hook
85 fails, else passes. if hook raises exception, treated as
85 fails, else passes. if hook raises exception, treated as
86 hook failure. exception propagates if throw is "true".
86 hook failure. exception propagates if throw is "true".
87
87
88 reason for "true" meaning "hook failed" is so that
88 reason for "true" meaning "hook failed" is so that
89 unmodified commands (e.g. mercurial.commands.update) can
89 unmodified commands (e.g. mercurial.commands.update) can
90 be run as hooks without wrappers to convert return values.'''
90 be run as hooks without wrappers to convert return values.'''
91
91
92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 d = funcname.rfind('.')
93 d = funcname.rfind('.')
94 if d == -1:
94 if d == -1:
95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 % (hname, funcname))
96 % (hname, funcname))
97 modname = funcname[:d]
97 modname = funcname[:d]
98 try:
98 try:
99 obj = __import__(modname)
99 obj = __import__(modname)
100 except ImportError:
100 except ImportError:
101 raise util.Abort(_('%s hook is invalid '
101 raise util.Abort(_('%s hook is invalid '
102 '(import of "%s" failed)') %
102 '(import of "%s" failed)') %
103 (hname, modname))
103 (hname, modname))
104 try:
104 try:
105 for p in funcname.split('.')[1:]:
105 for p in funcname.split('.')[1:]:
106 obj = getattr(obj, p)
106 obj = getattr(obj, p)
107 except AttributeError, err:
107 except AttributeError, err:
108 raise util.Abort(_('%s hook is invalid '
108 raise util.Abort(_('%s hook is invalid '
109 '("%s" is not defined)') %
109 '("%s" is not defined)') %
110 (hname, funcname))
110 (hname, funcname))
111 if not callable(obj):
111 if not callable(obj):
112 raise util.Abort(_('%s hook is invalid '
112 raise util.Abort(_('%s hook is invalid '
113 '("%s" is not callable)') %
113 '("%s" is not callable)') %
114 (hname, funcname))
114 (hname, funcname))
115 try:
115 try:
116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 except (KeyboardInterrupt, util.SignalInterrupt):
117 except (KeyboardInterrupt, util.SignalInterrupt):
118 raise
118 raise
119 except Exception, exc:
119 except Exception, exc:
120 if isinstance(exc, util.Abort):
120 if isinstance(exc, util.Abort):
121 self.ui.warn(_('error: %s hook failed: %s\n') %
121 self.ui.warn(_('error: %s hook failed: %s\n') %
122 (hname, exc.args[0] % exc.args[1:]))
122 (hname, exc.args[0] % exc.args[1:]))
123 else:
123 else:
124 self.ui.warn(_('error: %s hook raised an exception: '
124 self.ui.warn(_('error: %s hook raised an exception: '
125 '%s\n') % (hname, exc))
125 '%s\n') % (hname, exc))
126 if throw:
126 if throw:
127 raise
127 raise
128 if self.ui.traceback:
128 if self.ui.traceback:
129 traceback.print_exc()
129 traceback.print_exc()
130 return True
130 return True
131 if r:
131 if r:
132 if throw:
132 if throw:
133 raise util.Abort(_('%s hook failed') % hname)
133 raise util.Abort(_('%s hook failed') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 return r
135 return r
136
136
137 def runhook(name, cmd):
137 def runhook(name, cmd):
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
140 r = util.system(cmd, environ=env, cwd=self.root)
140 r = util.system(cmd, environ=env, cwd=self.root)
141 if r:
141 if r:
142 desc, r = util.explain_exit(r)
142 desc, r = util.explain_exit(r)
143 if throw:
143 if throw:
144 raise util.Abort(_('%s hook %s') % (name, desc))
144 raise util.Abort(_('%s hook %s') % (name, desc))
145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 return r
146 return r
147
147
148 r = False
148 r = False
149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 if hname.split(".", 1)[0] == name and cmd]
150 if hname.split(".", 1)[0] == name and cmd]
151 hooks.sort()
151 hooks.sort()
152 for hname, cmd in hooks:
152 for hname, cmd in hooks:
153 if cmd.startswith('python:'):
153 if cmd.startswith('python:'):
154 r = callhook(hname, cmd[7:].strip()) or r
154 r = callhook(hname, cmd[7:].strip()) or r
155 else:
155 else:
156 r = runhook(hname, cmd) or r
156 r = runhook(hname, cmd) or r
157 return r
157 return r
158
158
159 def tags(self):
159 def tags(self):
160 '''return a mapping of tag to node'''
160 '''return a mapping of tag to node'''
161 if not self.tagscache:
161 if not self.tagscache:
162 self.tagscache = {}
162 self.tagscache = {}
163
163
164 def parsetag(line, context):
164 def parsetag(line, context):
165 if not line:
165 if not line:
166 return
166 return
167 s = l.split(" ", 1)
167 s = l.split(" ", 1)
168 if len(s) != 2:
168 if len(s) != 2:
169 self.ui.warn(_("%s: cannot parse entry\n") % context)
169 self.ui.warn(_("%s: cannot parse entry\n") % context)
170 return
170 return
171 node, key = s
171 node, key = s
172 key = key.strip()
172 key = key.strip()
173 try:
173 try:
174 bin_n = bin(node)
174 bin_n = bin(node)
175 except TypeError:
175 except TypeError:
176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
177 (context, node))
177 (context, node))
178 return
178 return
179 if bin_n not in self.changelog.nodemap:
179 if bin_n not in self.changelog.nodemap:
180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
181 (context, key))
181 (context, key))
182 return
182 return
183 self.tagscache[key] = bin_n
183 self.tagscache[key] = bin_n
184
184
185 # read the tags file from each head, ending with the tip,
185 # read the tags file from each head, ending with the tip,
186 # and add each tag found to the map, with "newer" ones
186 # and add each tag found to the map, with "newer" ones
187 # taking precedence
187 # taking precedence
188 heads = self.heads()
188 heads = self.heads()
189 heads.reverse()
189 heads.reverse()
190 fl = self.file(".hgtags")
190 fl = self.file(".hgtags")
191 for node in heads:
191 for node in heads:
192 change = self.changelog.read(node)
192 change = self.changelog.read(node)
193 rev = self.changelog.rev(node)
193 rev = self.changelog.rev(node)
194 fn, ff = self.manifest.find(change[0], '.hgtags')
194 fn, ff = self.manifest.find(change[0], '.hgtags')
195 if fn is None: continue
195 if fn is None: continue
196 count = 0
196 count = 0
197 for l in fl.read(fn).splitlines():
197 for l in fl.read(fn).splitlines():
198 count += 1
198 count += 1
199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
200 (rev, short(node), count))
200 (rev, short(node), count))
201 try:
201 try:
202 f = self.opener("localtags")
202 f = self.opener("localtags")
203 count = 0
203 count = 0
204 for l in f:
204 for l in f:
205 count += 1
205 count += 1
206 parsetag(l, _("localtags, line %d") % count)
206 parsetag(l, _("localtags, line %d") % count)
207 except IOError:
207 except IOError:
208 pass
208 pass
209
209
210 self.tagscache['tip'] = self.changelog.tip()
210 self.tagscache['tip'] = self.changelog.tip()
211
211
212 return self.tagscache
212 return self.tagscache
213
213
214 def tagslist(self):
214 def tagslist(self):
215 '''return a list of tags ordered by revision'''
215 '''return a list of tags ordered by revision'''
216 l = []
216 l = []
217 for t, n in self.tags().items():
217 for t, n in self.tags().items():
218 try:
218 try:
219 r = self.changelog.rev(n)
219 r = self.changelog.rev(n)
220 except:
220 except:
221 r = -2 # sort to the beginning of the list if unknown
221 r = -2 # sort to the beginning of the list if unknown
222 l.append((r, t, n))
222 l.append((r, t, n))
223 l.sort()
223 l.sort()
224 return [(t, n) for r, t, n in l]
224 return [(t, n) for r, t, n in l]
225
225
226 def nodetags(self, node):
226 def nodetags(self, node):
227 '''return the tags associated with a node'''
227 '''return the tags associated with a node'''
228 if not self.nodetagscache:
228 if not self.nodetagscache:
229 self.nodetagscache = {}
229 self.nodetagscache = {}
230 for t, n in self.tags().items():
230 for t, n in self.tags().items():
231 self.nodetagscache.setdefault(n, []).append(t)
231 self.nodetagscache.setdefault(n, []).append(t)
232 return self.nodetagscache.get(node, [])
232 return self.nodetagscache.get(node, [])
233
233
234 def lookup(self, key):
234 def lookup(self, key):
235 try:
235 try:
236 return self.tags()[key]
236 return self.tags()[key]
237 except KeyError:
237 except KeyError:
238 try:
238 try:
239 return self.changelog.lookup(key)
239 return self.changelog.lookup(key)
240 except:
240 except:
241 raise repo.RepoError(_("unknown revision '%s'") % key)
241 raise repo.RepoError(_("unknown revision '%s'") % key)
242
242
243 def dev(self):
243 def dev(self):
244 return os.stat(self.path).st_dev
244 return os.stat(self.path).st_dev
245
245
246 def local(self):
246 def local(self):
247 return True
247 return True
248
248
249 def join(self, f):
249 def join(self, f):
250 return os.path.join(self.path, f)
250 return os.path.join(self.path, f)
251
251
252 def wjoin(self, f):
252 def wjoin(self, f):
253 return os.path.join(self.root, f)
253 return os.path.join(self.root, f)
254
254
255 def file(self, f):
255 def file(self, f):
256 if f[0] == '/':
256 if f[0] == '/':
257 f = f[1:]
257 f = f[1:]
258 return filelog.filelog(self.opener, f, self.revlogversion)
258 return filelog.filelog(self.opener, f, self.revlogversion)
259
259
260 def getcwd(self):
260 def getcwd(self):
261 return self.dirstate.getcwd()
261 return self.dirstate.getcwd()
262
262
263 def wfile(self, f, mode='r'):
263 def wfile(self, f, mode='r'):
264 return self.wopener(f, mode)
264 return self.wopener(f, mode)
265
265
266 def wread(self, filename):
266 def wread(self, filename):
267 if self.encodepats == None:
267 if self.encodepats == None:
268 l = []
268 l = []
269 for pat, cmd in self.ui.configitems("encode"):
269 for pat, cmd in self.ui.configitems("encode"):
270 mf = util.matcher(self.root, "", [pat], [], [])[1]
270 mf = util.matcher(self.root, "", [pat], [], [])[1]
271 l.append((mf, cmd))
271 l.append((mf, cmd))
272 self.encodepats = l
272 self.encodepats = l
273
273
274 data = self.wopener(filename, 'r').read()
274 data = self.wopener(filename, 'r').read()
275
275
276 for mf, cmd in self.encodepats:
276 for mf, cmd in self.encodepats:
277 if mf(filename):
277 if mf(filename):
278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
279 data = util.filter(data, cmd)
279 data = util.filter(data, cmd)
280 break
280 break
281
281
282 return data
282 return data
283
283
284 def wwrite(self, filename, data, fd=None):
284 def wwrite(self, filename, data, fd=None):
285 if self.decodepats == None:
285 if self.decodepats == None:
286 l = []
286 l = []
287 for pat, cmd in self.ui.configitems("decode"):
287 for pat, cmd in self.ui.configitems("decode"):
288 mf = util.matcher(self.root, "", [pat], [], [])[1]
288 mf = util.matcher(self.root, "", [pat], [], [])[1]
289 l.append((mf, cmd))
289 l.append((mf, cmd))
290 self.decodepats = l
290 self.decodepats = l
291
291
292 for mf, cmd in self.decodepats:
292 for mf, cmd in self.decodepats:
293 if mf(filename):
293 if mf(filename):
294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
295 data = util.filter(data, cmd)
295 data = util.filter(data, cmd)
296 break
296 break
297
297
298 if fd:
298 if fd:
299 return fd.write(data)
299 return fd.write(data)
300 return self.wopener(filename, 'w').write(data)
300 return self.wopener(filename, 'w').write(data)
301
301
302 def transaction(self):
302 def transaction(self):
303 tr = self.transhandle
303 tr = self.transhandle
304 if tr != None and tr.running():
304 if tr != None and tr.running():
305 return tr.nest()
305 return tr.nest()
306
306
307 # save dirstate for undo
307 # save dirstate for undo
308 try:
308 try:
309 ds = self.opener("dirstate").read()
309 ds = self.opener("dirstate").read()
310 except IOError:
310 except IOError:
311 ds = ""
311 ds = ""
312 self.opener("journal.dirstate", "w").write(ds)
312 self.opener("journal.dirstate", "w").write(ds)
313
313
314 tr = transaction.transaction(self.ui.warn, self.opener,
314 tr = transaction.transaction(self.ui.warn, self.opener,
315 self.join("journal"),
315 self.join("journal"),
316 aftertrans(self.path))
316 aftertrans(self.path))
317 self.transhandle = tr
317 self.transhandle = tr
318 return tr
318 return tr
319
319
320 def recover(self):
320 def recover(self):
321 l = self.lock()
321 l = self.lock()
322 if os.path.exists(self.join("journal")):
322 if os.path.exists(self.join("journal")):
323 self.ui.status(_("rolling back interrupted transaction\n"))
323 self.ui.status(_("rolling back interrupted transaction\n"))
324 transaction.rollback(self.opener, self.join("journal"))
324 transaction.rollback(self.opener, self.join("journal"))
325 self.reload()
325 self.reload()
326 return True
326 return True
327 else:
327 else:
328 self.ui.warn(_("no interrupted transaction available\n"))
328 self.ui.warn(_("no interrupted transaction available\n"))
329 return False
329 return False
330
330
331 def undo(self, wlock=None):
331 def undo(self, wlock=None):
332 if not wlock:
332 if not wlock:
333 wlock = self.wlock()
333 wlock = self.wlock()
334 l = self.lock()
334 l = self.lock()
335 if os.path.exists(self.join("undo")):
335 if os.path.exists(self.join("undo")):
336 self.ui.status(_("rolling back last transaction\n"))
336 self.ui.status(_("rolling back last transaction\n"))
337 transaction.rollback(self.opener, self.join("undo"))
337 transaction.rollback(self.opener, self.join("undo"))
338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
339 self.reload()
339 self.reload()
340 self.wreload()
340 self.wreload()
341 else:
341 else:
342 self.ui.warn(_("no undo information available\n"))
342 self.ui.warn(_("no undo information available\n"))
343
343
344 def wreload(self):
344 def wreload(self):
345 self.dirstate.read()
345 self.dirstate.read()
346
346
347 def reload(self):
347 def reload(self):
348 self.changelog.load()
348 self.changelog.load()
349 self.manifest.load()
349 self.manifest.load()
350 self.tagscache = None
350 self.tagscache = None
351 self.nodetagscache = None
351 self.nodetagscache = None
352
352
353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
354 desc=None):
354 desc=None):
355 try:
355 try:
356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
357 except lock.LockHeld, inst:
357 except lock.LockHeld, inst:
358 if not wait:
358 if not wait:
359 raise
359 raise
360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
361 (desc, inst.args[0]))
361 (desc, inst.args[0]))
362 # default to 600 seconds timeout
362 # default to 600 seconds timeout
363 l = lock.lock(self.join(lockname),
363 l = lock.lock(self.join(lockname),
364 int(self.ui.config("ui", "timeout") or 600),
364 int(self.ui.config("ui", "timeout") or 600),
365 releasefn, desc=desc)
365 releasefn, desc=desc)
366 if acquirefn:
366 if acquirefn:
367 acquirefn()
367 acquirefn()
368 return l
368 return l
369
369
370 def lock(self, wait=1):
370 def lock(self, wait=1):
371 return self.do_lock("lock", wait, acquirefn=self.reload,
371 return self.do_lock("lock", wait, acquirefn=self.reload,
372 desc=_('repository %s') % self.origroot)
372 desc=_('repository %s') % self.origroot)
373
373
374 def wlock(self, wait=1):
374 def wlock(self, wait=1):
375 return self.do_lock("wlock", wait, self.dirstate.write,
375 return self.do_lock("wlock", wait, self.dirstate.write,
376 self.wreload,
376 self.wreload,
377 desc=_('working directory of %s') % self.origroot)
377 desc=_('working directory of %s') % self.origroot)
378
378
379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
380 "determine whether a new filenode is needed"
380 "determine whether a new filenode is needed"
381 fp1 = manifest1.get(filename, nullid)
381 fp1 = manifest1.get(filename, nullid)
382 fp2 = manifest2.get(filename, nullid)
382 fp2 = manifest2.get(filename, nullid)
383
383
384 if fp2 != nullid:
384 if fp2 != nullid:
385 # is one parent an ancestor of the other?
385 # is one parent an ancestor of the other?
386 fpa = filelog.ancestor(fp1, fp2)
386 fpa = filelog.ancestor(fp1, fp2)
387 if fpa == fp1:
387 if fpa == fp1:
388 fp1, fp2 = fp2, nullid
388 fp1, fp2 = fp2, nullid
389 elif fpa == fp2:
389 elif fpa == fp2:
390 fp2 = nullid
390 fp2 = nullid
391
391
392 # is the file unmodified from the parent? report existing entry
392 # is the file unmodified from the parent? report existing entry
393 if fp2 == nullid and text == filelog.read(fp1):
393 if fp2 == nullid and text == filelog.read(fp1):
394 return (fp1, None, None)
394 return (fp1, None, None)
395
395
396 return (None, fp1, fp2)
396 return (None, fp1, fp2)
397
397
398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
399 orig_parent = self.dirstate.parents()[0] or nullid
399 orig_parent = self.dirstate.parents()[0] or nullid
400 p1 = p1 or self.dirstate.parents()[0] or nullid
400 p1 = p1 or self.dirstate.parents()[0] or nullid
401 p2 = p2 or self.dirstate.parents()[1] or nullid
401 p2 = p2 or self.dirstate.parents()[1] or nullid
402 c1 = self.changelog.read(p1)
402 c1 = self.changelog.read(p1)
403 c2 = self.changelog.read(p2)
403 c2 = self.changelog.read(p2)
404 m1 = self.manifest.read(c1[0])
404 m1 = self.manifest.read(c1[0])
405 mf1 = self.manifest.readflags(c1[0])
405 mf1 = self.manifest.readflags(c1[0])
406 m2 = self.manifest.read(c2[0])
406 m2 = self.manifest.read(c2[0])
407 changed = []
407 changed = []
408
408
409 if orig_parent == p1:
409 if orig_parent == p1:
410 update_dirstate = 1
410 update_dirstate = 1
411 else:
411 else:
412 update_dirstate = 0
412 update_dirstate = 0
413
413
414 if not wlock:
414 if not wlock:
415 wlock = self.wlock()
415 wlock = self.wlock()
416 l = self.lock()
416 l = self.lock()
417 tr = self.transaction()
417 tr = self.transaction()
418 mm = m1.copy()
418 mm = m1.copy()
419 mfm = mf1.copy()
419 mfm = mf1.copy()
420 linkrev = self.changelog.count()
420 linkrev = self.changelog.count()
421 for f in files:
421 for f in files:
422 try:
422 try:
423 t = self.wread(f)
423 t = self.wread(f)
424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
425 r = self.file(f)
425 r = self.file(f)
426 mfm[f] = tm
426 mfm[f] = tm
427
427
428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
429 if entry:
429 if entry:
430 mm[f] = entry
430 mm[f] = entry
431 continue
431 continue
432
432
433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
434 changed.append(f)
434 changed.append(f)
435 if update_dirstate:
435 if update_dirstate:
436 self.dirstate.update([f], "n")
436 self.dirstate.update([f], "n")
437 except IOError:
437 except IOError:
438 try:
438 try:
439 del mm[f]
439 del mm[f]
440 del mfm[f]
440 del mfm[f]
441 if update_dirstate:
441 if update_dirstate:
442 self.dirstate.forget([f])
442 self.dirstate.forget([f])
443 except:
443 except:
444 # deleted from p2?
444 # deleted from p2?
445 pass
445 pass
446
446
447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
448 user = user or self.ui.username()
448 user = user or self.ui.username()
449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
450 tr.close()
450 tr.close()
451 if update_dirstate:
451 if update_dirstate:
452 self.dirstate.setparents(n, nullid)
452 self.dirstate.setparents(n, nullid)
453
453
454 def commit(self, files=None, text="", user=None, date=None,
454 def commit(self, files=None, text="", user=None, date=None,
455 match=util.always, force=False, lock=None, wlock=None,
455 match=util.always, force=False, lock=None, wlock=None,
456 force_editor=False):
456 force_editor=False):
457 commit = []
457 commit = []
458 remove = []
458 remove = []
459 changed = []
459 changed = []
460
460
461 if files:
461 if files:
462 for f in files:
462 for f in files:
463 s = self.dirstate.state(f)
463 s = self.dirstate.state(f)
464 if s in 'nmai':
464 if s in 'nmai':
465 commit.append(f)
465 commit.append(f)
466 elif s == 'r':
466 elif s == 'r':
467 remove.append(f)
467 remove.append(f)
468 else:
468 else:
469 self.ui.warn(_("%s not tracked!\n") % f)
469 self.ui.warn(_("%s not tracked!\n") % f)
470 else:
470 else:
471 modified, added, removed, deleted, unknown = self.changes(match=match)
471 modified, added, removed, deleted, unknown = self.changes(match=match)
472 commit = modified + added
472 commit = modified + added
473 remove = removed
473 remove = removed
474
474
475 p1, p2 = self.dirstate.parents()
475 p1, p2 = self.dirstate.parents()
476 c1 = self.changelog.read(p1)
476 c1 = self.changelog.read(p1)
477 c2 = self.changelog.read(p2)
477 c2 = self.changelog.read(p2)
478 m1 = self.manifest.read(c1[0])
478 m1 = self.manifest.read(c1[0])
479 mf1 = self.manifest.readflags(c1[0])
479 mf1 = self.manifest.readflags(c1[0])
480 m2 = self.manifest.read(c2[0])
480 m2 = self.manifest.read(c2[0])
481
481
482 if not commit and not remove and not force and p2 == nullid:
482 if not commit and not remove and not force and p2 == nullid:
483 self.ui.status(_("nothing changed\n"))
483 self.ui.status(_("nothing changed\n"))
484 return None
484 return None
485
485
486 xp1 = hex(p1)
486 xp1 = hex(p1)
487 if p2 == nullid: xp2 = ''
487 if p2 == nullid: xp2 = ''
488 else: xp2 = hex(p2)
488 else: xp2 = hex(p2)
489
489
490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
491
491
492 if not wlock:
492 if not wlock:
493 wlock = self.wlock()
493 wlock = self.wlock()
494 if not lock:
494 if not lock:
495 lock = self.lock()
495 lock = self.lock()
496 tr = self.transaction()
496 tr = self.transaction()
497
497
498 # check in files
498 # check in files
499 new = {}
499 new = {}
500 linkrev = self.changelog.count()
500 linkrev = self.changelog.count()
501 commit.sort()
501 commit.sort()
502 for f in commit:
502 for f in commit:
503 self.ui.note(f + "\n")
503 self.ui.note(f + "\n")
504 try:
504 try:
505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
506 t = self.wread(f)
506 t = self.wread(f)
507 except IOError:
507 except IOError:
508 self.ui.warn(_("trouble committing %s!\n") % f)
508 self.ui.warn(_("trouble committing %s!\n") % f)
509 raise
509 raise
510
510
511 r = self.file(f)
511 r = self.file(f)
512
512
513 meta = {}
513 meta = {}
514 cp = self.dirstate.copied(f)
514 cp = self.dirstate.copied(f)
515 if cp:
515 if cp:
516 meta["copy"] = cp
516 meta["copy"] = cp
517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
519 fp1, fp2 = nullid, nullid
519 fp1, fp2 = nullid, nullid
520 else:
520 else:
521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
522 if entry:
522 if entry:
523 new[f] = entry
523 new[f] = entry
524 continue
524 continue
525
525
526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
527 # remember what we've added so that we can later calculate
527 # remember what we've added so that we can later calculate
528 # the files to pull from a set of changesets
528 # the files to pull from a set of changesets
529 changed.append(f)
529 changed.append(f)
530
530
531 # update manifest
531 # update manifest
532 m1 = m1.copy()
532 m1 = m1.copy()
533 m1.update(new)
533 m1.update(new)
534 for f in remove:
534 for f in remove:
535 if f in m1:
535 if f in m1:
536 del m1[f]
536 del m1[f]
537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
538 (new, remove))
538 (new, remove))
539
539
540 # add changeset
540 # add changeset
541 new = new.keys()
541 new = new.keys()
542 new.sort()
542 new.sort()
543
543
544 user = user or self.ui.username()
544 user = user or self.ui.username()
545 if not text or force_editor:
545 if not text or force_editor:
546 edittext = []
546 edittext = []
547 if text:
547 if text:
548 edittext.append(text)
548 edittext.append(text)
549 edittext.append("")
549 edittext.append("")
550 if p2 != nullid:
550 if p2 != nullid:
551 edittext.append("HG: branch merge")
551 edittext.append("HG: branch merge")
552 edittext.extend(["HG: changed %s" % f for f in changed])
552 edittext.extend(["HG: changed %s" % f for f in changed])
553 edittext.extend(["HG: removed %s" % f for f in remove])
553 edittext.extend(["HG: removed %s" % f for f in remove])
554 if not changed and not remove:
554 if not changed and not remove:
555 edittext.append("HG: no files changed")
555 edittext.append("HG: no files changed")
556 edittext.append("")
556 edittext.append("")
557 # run editor in the repository root
557 # run editor in the repository root
558 olddir = os.getcwd()
558 olddir = os.getcwd()
559 os.chdir(self.root)
559 os.chdir(self.root)
560 text = self.ui.edit("\n".join(edittext), user)
560 text = self.ui.edit("\n".join(edittext), user)
561 os.chdir(olddir)
561 os.chdir(olddir)
562
562
563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
564 while lines and not lines[0]:
564 while lines and not lines[0]:
565 del lines[0]
565 del lines[0]
566 if not lines:
566 if not lines:
567 return None
567 return None
568 text = '\n'.join(lines)
568 text = '\n'.join(lines)
569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
571 parent2=xp2)
571 parent2=xp2)
572 tr.close()
572 tr.close()
573
573
574 self.dirstate.setparents(n)
574 self.dirstate.setparents(n)
575 self.dirstate.update(new, "n")
575 self.dirstate.update(new, "n")
576 self.dirstate.forget(remove)
576 self.dirstate.forget(remove)
577
577
578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
579 return n
579 return n
580
580
581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
582 if node:
582 if node:
583 fdict = dict.fromkeys(files)
583 fdict = dict.fromkeys(files)
584 for fn in self.manifest.read(self.changelog.read(node)[0]):
584 for fn in self.manifest.read(self.changelog.read(node)[0]):
585 fdict.pop(fn, None)
585 fdict.pop(fn, None)
586 if match(fn):
586 if match(fn):
587 yield 'm', fn
587 yield 'm', fn
588 for fn in fdict:
588 for fn in fdict:
589 if badmatch and badmatch(fn):
589 if badmatch and badmatch(fn):
590 if match(fn):
590 if match(fn):
591 yield 'b', fn
591 yield 'b', fn
592 else:
592 else:
593 self.ui.warn(_('%s: No such file in rev %s\n') % (
593 self.ui.warn(_('%s: No such file in rev %s\n') % (
594 util.pathto(self.getcwd(), fn), short(node)))
594 util.pathto(self.getcwd(), fn), short(node)))
595 else:
595 else:
596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
597 yield src, fn
597 yield src, fn
598
598
599 def changes(self, node1=None, node2=None, files=[], match=util.always,
599 def changes(self, node1=None, node2=None, files=[], match=util.always,
600 wlock=None, show_ignored=None):
600 wlock=None, show_ignored=None):
601 """return changes between two nodes or node and working directory
601 """return changes between two nodes or node and working directory
602
602
603 If node1 is None, use the first dirstate parent instead.
603 If node1 is None, use the first dirstate parent instead.
604 If node2 is None, compare node1 with working directory.
604 If node2 is None, compare node1 with working directory.
605 """
605 """
606
606
607 def fcmp(fn, mf):
607 def fcmp(fn, mf):
608 t1 = self.wread(fn)
608 t1 = self.wread(fn)
609 t2 = self.file(fn).read(mf.get(fn, nullid))
609 t2 = self.file(fn).read(mf.get(fn, nullid))
610 return cmp(t1, t2)
610 return cmp(t1, t2)
611
611
612 def mfmatches(node):
612 def mfmatches(node):
613 change = self.changelog.read(node)
613 change = self.changelog.read(node)
614 mf = dict(self.manifest.read(change[0]))
614 mf = dict(self.manifest.read(change[0]))
615 for fn in mf.keys():
615 for fn in mf.keys():
616 if not match(fn):
616 if not match(fn):
617 del mf[fn]
617 del mf[fn]
618 return mf
618 return mf
619
619
620 if node1:
620 if node1:
621 # read the manifest from node1 before the manifest from node2,
621 # read the manifest from node1 before the manifest from node2,
622 # so that we'll hit the manifest cache if we're going through
622 # so that we'll hit the manifest cache if we're going through
623 # all the revisions in parent->child order.
623 # all the revisions in parent->child order.
624 mf1 = mfmatches(node1)
624 mf1 = mfmatches(node1)
625
625
626 # are we comparing the working directory?
626 # are we comparing the working directory?
627 if not node2:
627 if not node2:
628 if not wlock:
628 if not wlock:
629 try:
629 try:
630 wlock = self.wlock(wait=0)
630 wlock = self.wlock(wait=0)
631 except lock.LockException:
631 except lock.LockException:
632 wlock = None
632 wlock = None
633 lookup, modified, added, removed, deleted, unknown, ignored = (
633 lookup, modified, added, removed, deleted, unknown, ignored = (
634 self.dirstate.changes(files, match, show_ignored))
634 self.dirstate.changes(files, match, show_ignored))
635
635
636 # are we comparing working dir against its parent?
636 # are we comparing working dir against its parent?
637 if not node1:
637 if not node1:
638 if lookup:
638 if lookup:
639 # do a full compare of any files that might have changed
639 # do a full compare of any files that might have changed
640 mf2 = mfmatches(self.dirstate.parents()[0])
640 mf2 = mfmatches(self.dirstate.parents()[0])
641 for f in lookup:
641 for f in lookup:
642 if fcmp(f, mf2):
642 if fcmp(f, mf2):
643 modified.append(f)
643 modified.append(f)
644 elif wlock is not None:
644 elif wlock is not None:
645 self.dirstate.update([f], "n")
645 self.dirstate.update([f], "n")
646 else:
646 else:
647 # we are comparing working dir against non-parent
647 # we are comparing working dir against non-parent
648 # generate a pseudo-manifest for the working dir
648 # generate a pseudo-manifest for the working dir
649 mf2 = mfmatches(self.dirstate.parents()[0])
649 mf2 = mfmatches(self.dirstate.parents()[0])
650 for f in lookup + modified + added:
650 for f in lookup + modified + added:
651 mf2[f] = ""
651 mf2[f] = ""
652 for f in removed:
652 for f in removed:
653 if f in mf2:
653 if f in mf2:
654 del mf2[f]
654 del mf2[f]
655 else:
655 else:
656 # we are comparing two revisions
656 # we are comparing two revisions
657 deleted, unknown, ignored = [], [], []
657 deleted, unknown, ignored = [], [], []
658 mf2 = mfmatches(node2)
658 mf2 = mfmatches(node2)
659
659
660 if node1:
660 if node1:
661 # flush lists from dirstate before comparing manifests
661 # flush lists from dirstate before comparing manifests
662 modified, added = [], []
662 modified, added = [], []
663
663
664 for fn in mf2:
664 for fn in mf2:
665 if mf1.has_key(fn):
665 if mf1.has_key(fn):
666 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
666 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
667 modified.append(fn)
667 modified.append(fn)
668 del mf1[fn]
668 del mf1[fn]
669 else:
669 else:
670 added.append(fn)
670 added.append(fn)
671
671
672 removed = mf1.keys()
672 removed = mf1.keys()
673
673
674 # sort and return results:
674 # sort and return results:
675 for l in modified, added, removed, deleted, unknown, ignored:
675 for l in modified, added, removed, deleted, unknown, ignored:
676 l.sort()
676 l.sort()
677 if show_ignored is None:
677 if show_ignored is None:
678 return (modified, added, removed, deleted, unknown)
678 return (modified, added, removed, deleted, unknown)
679 else:
679 else:
680 return (modified, added, removed, deleted, unknown, ignored)
680 return (modified, added, removed, deleted, unknown, ignored)
681
681
682 def add(self, list, wlock=None):
682 def add(self, list, wlock=None):
683 if not wlock:
683 if not wlock:
684 wlock = self.wlock()
684 wlock = self.wlock()
685 for f in list:
685 for f in list:
686 p = self.wjoin(f)
686 p = self.wjoin(f)
687 if not os.path.exists(p):
687 if not os.path.exists(p):
688 self.ui.warn(_("%s does not exist!\n") % f)
688 self.ui.warn(_("%s does not exist!\n") % f)
689 elif not os.path.isfile(p):
689 elif not os.path.isfile(p):
690 self.ui.warn(_("%s not added: only files supported currently\n")
690 self.ui.warn(_("%s not added: only files supported currently\n")
691 % f)
691 % f)
692 elif self.dirstate.state(f) in 'an':
692 elif self.dirstate.state(f) in 'an':
693 self.ui.warn(_("%s already tracked!\n") % f)
693 self.ui.warn(_("%s already tracked!\n") % f)
694 else:
694 else:
695 self.dirstate.update([f], "a")
695 self.dirstate.update([f], "a")
696
696
697 def forget(self, list, wlock=None):
697 def forget(self, list, wlock=None):
698 if not wlock:
698 if not wlock:
699 wlock = self.wlock()
699 wlock = self.wlock()
700 for f in list:
700 for f in list:
701 if self.dirstate.state(f) not in 'ai':
701 if self.dirstate.state(f) not in 'ai':
702 self.ui.warn(_("%s not added!\n") % f)
702 self.ui.warn(_("%s not added!\n") % f)
703 else:
703 else:
704 self.dirstate.forget([f])
704 self.dirstate.forget([f])
705
705
706 def remove(self, list, unlink=False, wlock=None):
706 def remove(self, list, unlink=False, wlock=None):
707 if unlink:
707 if unlink:
708 for f in list:
708 for f in list:
709 try:
709 try:
710 util.unlink(self.wjoin(f))
710 util.unlink(self.wjoin(f))
711 except OSError, inst:
711 except OSError, inst:
712 if inst.errno != errno.ENOENT:
712 if inst.errno != errno.ENOENT:
713 raise
713 raise
714 if not wlock:
714 if not wlock:
715 wlock = self.wlock()
715 wlock = self.wlock()
716 for f in list:
716 for f in list:
717 p = self.wjoin(f)
717 p = self.wjoin(f)
718 if os.path.exists(p):
718 if os.path.exists(p):
719 self.ui.warn(_("%s still exists!\n") % f)
719 self.ui.warn(_("%s still exists!\n") % f)
720 elif self.dirstate.state(f) == 'a':
720 elif self.dirstate.state(f) == 'a':
721 self.dirstate.forget([f])
721 self.dirstate.forget([f])
722 elif f not in self.dirstate:
722 elif f not in self.dirstate:
723 self.ui.warn(_("%s not tracked!\n") % f)
723 self.ui.warn(_("%s not tracked!\n") % f)
724 else:
724 else:
725 self.dirstate.update([f], "r")
725 self.dirstate.update([f], "r")
726
726
727 def undelete(self, list, wlock=None):
727 def undelete(self, list, wlock=None):
728 p = self.dirstate.parents()[0]
728 p = self.dirstate.parents()[0]
729 mn = self.changelog.read(p)[0]
729 mn = self.changelog.read(p)[0]
730 mf = self.manifest.readflags(mn)
730 mf = self.manifest.readflags(mn)
731 m = self.manifest.read(mn)
731 m = self.manifest.read(mn)
732 if not wlock:
732 if not wlock:
733 wlock = self.wlock()
733 wlock = self.wlock()
734 for f in list:
734 for f in list:
735 if self.dirstate.state(f) not in "r":
735 if self.dirstate.state(f) not in "r":
736 self.ui.warn("%s not removed!\n" % f)
736 self.ui.warn("%s not removed!\n" % f)
737 else:
737 else:
738 t = self.file(f).read(m[f])
738 t = self.file(f).read(m[f])
739 self.wwrite(f, t)
739 self.wwrite(f, t)
740 util.set_exec(self.wjoin(f), mf[f])
740 util.set_exec(self.wjoin(f), mf[f])
741 self.dirstate.update([f], "n")
741 self.dirstate.update([f], "n")
742
742
743 def copy(self, source, dest, wlock=None):
743 def copy(self, source, dest, wlock=None):
744 p = self.wjoin(dest)
744 p = self.wjoin(dest)
745 if not os.path.exists(p):
745 if not os.path.exists(p):
746 self.ui.warn(_("%s does not exist!\n") % dest)
746 self.ui.warn(_("%s does not exist!\n") % dest)
747 elif not os.path.isfile(p):
747 elif not os.path.isfile(p):
748 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
748 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
749 else:
749 else:
750 if not wlock:
750 if not wlock:
751 wlock = self.wlock()
751 wlock = self.wlock()
752 if self.dirstate.state(dest) == '?':
752 if self.dirstate.state(dest) == '?':
753 self.dirstate.update([dest], "a")
753 self.dirstate.update([dest], "a")
754 self.dirstate.copy(source, dest)
754 self.dirstate.copy(source, dest)
755
755
756 def heads(self, start=None):
756 def heads(self, start=None):
757 heads = self.changelog.heads(start)
757 heads = self.changelog.heads(start)
758 # sort the output in rev descending order
758 # sort the output in rev descending order
759 heads = [(-self.changelog.rev(h), h) for h in heads]
759 heads = [(-self.changelog.rev(h), h) for h in heads]
760 heads.sort()
760 heads.sort()
761 return [n for (r, n) in heads]
761 return [n for (r, n) in heads]
762
762
763 # branchlookup returns a dict giving a list of branches for
763 # branchlookup returns a dict giving a list of branches for
764 # each head. A branch is defined as the tag of a node or
764 # each head. A branch is defined as the tag of a node or
765 # the branch of the node's parents. If a node has multiple
765 # the branch of the node's parents. If a node has multiple
766 # branch tags, tags are eliminated if they are visible from other
766 # branch tags, tags are eliminated if they are visible from other
767 # branch tags.
767 # branch tags.
768 #
768 #
769 # So, for this graph: a->b->c->d->e
769 # So, for this graph: a->b->c->d->e
770 # \ /
770 # \ /
771 # aa -----/
771 # aa -----/
772 # a has tag 2.6.12
772 # a has tag 2.6.12
773 # d has tag 2.6.13
773 # d has tag 2.6.13
774 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
774 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
775 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
775 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
776 # from the list.
776 # from the list.
777 #
777 #
778 # It is possible that more than one head will have the same branch tag.
778 # It is possible that more than one head will have the same branch tag.
779 # callers need to check the result for multiple heads under the same
779 # callers need to check the result for multiple heads under the same
780 # branch tag if that is a problem for them (ie checkout of a specific
780 # branch tag if that is a problem for them (ie checkout of a specific
781 # branch).
781 # branch).
782 #
782 #
783 # passing in a specific branch will limit the depth of the search
783 # passing in a specific branch will limit the depth of the search
784 # through the parents. It won't limit the branches returned in the
784 # through the parents. It won't limit the branches returned in the
785 # result though.
785 # result though.
786 def branchlookup(self, heads=None, branch=None):
786 def branchlookup(self, heads=None, branch=None):
787 if not heads:
787 if not heads:
788 heads = self.heads()
788 heads = self.heads()
789 headt = [ h for h in heads ]
789 headt = [ h for h in heads ]
790 chlog = self.changelog
790 chlog = self.changelog
791 branches = {}
791 branches = {}
792 merges = []
792 merges = []
793 seenmerge = {}
793 seenmerge = {}
794
794
795 # traverse the tree once for each head, recording in the branches
795 # traverse the tree once for each head, recording in the branches
796 # dict which tags are visible from this head. The branches
796 # dict which tags are visible from this head. The branches
797 # dict also records which tags are visible from each tag
797 # dict also records which tags are visible from each tag
798 # while we traverse.
798 # while we traverse.
799 while headt or merges:
799 while headt or merges:
800 if merges:
800 if merges:
801 n, found = merges.pop()
801 n, found = merges.pop()
802 visit = [n]
802 visit = [n]
803 else:
803 else:
804 h = headt.pop()
804 h = headt.pop()
805 visit = [h]
805 visit = [h]
806 found = [h]
806 found = [h]
807 seen = {}
807 seen = {}
808 while visit:
808 while visit:
809 n = visit.pop()
809 n = visit.pop()
810 if n in seen:
810 if n in seen:
811 continue
811 continue
812 pp = chlog.parents(n)
812 pp = chlog.parents(n)
813 tags = self.nodetags(n)
813 tags = self.nodetags(n)
814 if tags:
814 if tags:
815 for x in tags:
815 for x in tags:
816 if x == 'tip':
816 if x == 'tip':
817 continue
817 continue
818 for f in found:
818 for f in found:
819 branches.setdefault(f, {})[n] = 1
819 branches.setdefault(f, {})[n] = 1
820 branches.setdefault(n, {})[n] = 1
820 branches.setdefault(n, {})[n] = 1
821 break
821 break
822 if n not in found:
822 if n not in found:
823 found.append(n)
823 found.append(n)
824 if branch in tags:
824 if branch in tags:
825 continue
825 continue
826 seen[n] = 1
826 seen[n] = 1
827 if pp[1] != nullid and n not in seenmerge:
827 if pp[1] != nullid and n not in seenmerge:
828 merges.append((pp[1], [x for x in found]))
828 merges.append((pp[1], [x for x in found]))
829 seenmerge[n] = 1
829 seenmerge[n] = 1
830 if pp[0] != nullid:
830 if pp[0] != nullid:
831 visit.append(pp[0])
831 visit.append(pp[0])
832 # traverse the branches dict, eliminating branch tags from each
832 # traverse the branches dict, eliminating branch tags from each
833 # head that are visible from another branch tag for that head.
833 # head that are visible from another branch tag for that head.
834 out = {}
834 out = {}
835 viscache = {}
835 viscache = {}
836 for h in heads:
836 for h in heads:
837 def visible(node):
837 def visible(node):
838 if node in viscache:
838 if node in viscache:
839 return viscache[node]
839 return viscache[node]
840 ret = {}
840 ret = {}
841 visit = [node]
841 visit = [node]
842 while visit:
842 while visit:
843 x = visit.pop()
843 x = visit.pop()
844 if x in viscache:
844 if x in viscache:
845 ret.update(viscache[x])
845 ret.update(viscache[x])
846 elif x not in ret:
846 elif x not in ret:
847 ret[x] = 1
847 ret[x] = 1
848 if x in branches:
848 if x in branches:
849 visit[len(visit):] = branches[x].keys()
849 visit[len(visit):] = branches[x].keys()
850 viscache[node] = ret
850 viscache[node] = ret
851 return ret
851 return ret
852 if h not in branches:
852 if h not in branches:
853 continue
853 continue
854 # O(n^2), but somewhat limited. This only searches the
854 # O(n^2), but somewhat limited. This only searches the
855 # tags visible from a specific head, not all the tags in the
855 # tags visible from a specific head, not all the tags in the
856 # whole repo.
856 # whole repo.
857 for b in branches[h]:
857 for b in branches[h]:
858 vis = False
858 vis = False
859 for bb in branches[h].keys():
859 for bb in branches[h].keys():
860 if b != bb:
860 if b != bb:
861 if b in visible(bb):
861 if b in visible(bb):
862 vis = True
862 vis = True
863 break
863 break
864 if not vis:
864 if not vis:
865 l = out.setdefault(h, [])
865 l = out.setdefault(h, [])
866 l[len(l):] = self.nodetags(b)
866 l[len(l):] = self.nodetags(b)
867 return out
867 return out
868
868
869 def branches(self, nodes):
869 def branches(self, nodes):
870 if not nodes:
870 if not nodes:
871 nodes = [self.changelog.tip()]
871 nodes = [self.changelog.tip()]
872 b = []
872 b = []
873 for n in nodes:
873 for n in nodes:
874 t = n
874 t = n
875 while n:
875 while 1:
876 p = self.changelog.parents(n)
876 p = self.changelog.parents(n)
877 if p[1] != nullid or p[0] == nullid:
877 if p[1] != nullid or p[0] == nullid:
878 b.append((t, n, p[0], p[1]))
878 b.append((t, n, p[0], p[1]))
879 break
879 break
880 n = p[0]
880 n = p[0]
881 return b
881 return b
882
882
883 def between(self, pairs):
883 def between(self, pairs):
884 r = []
884 r = []
885
885
886 for top, bottom in pairs:
886 for top, bottom in pairs:
887 n, l, i = top, [], 0
887 n, l, i = top, [], 0
888 f = 1
888 f = 1
889
889
890 while n != bottom:
890 while n != bottom:
891 p = self.changelog.parents(n)[0]
891 p = self.changelog.parents(n)[0]
892 if i == f:
892 if i == f:
893 l.append(n)
893 l.append(n)
894 f = f * 2
894 f = f * 2
895 n = p
895 n = p
896 i += 1
896 i += 1
897
897
898 r.append(l)
898 r.append(l)
899
899
900 return r
900 return r
901
901
902 def findincoming(self, remote, base=None, heads=None, force=False):
902 def findincoming(self, remote, base=None, heads=None, force=False):
903 m = self.changelog.nodemap
903 m = self.changelog.nodemap
904 search = []
904 search = []
905 fetch = {}
905 fetch = {}
906 seen = {}
906 seen = {}
907 seenbranch = {}
907 seenbranch = {}
908 if base == None:
908 if base == None:
909 base = {}
909 base = {}
910
910
911 if not heads:
911 if not heads:
912 heads = remote.heads()
912 heads = remote.heads()
913
913
914 if self.changelog.tip() == nullid:
914 if self.changelog.tip() == nullid:
915 if heads != [nullid]:
915 if heads != [nullid]:
916 return [nullid]
916 return [nullid]
917 return []
917 return []
918
918
919 # assume we're closer to the tip than the root
919 # assume we're closer to the tip than the root
920 # and start by examining the heads
920 # and start by examining the heads
921 self.ui.status(_("searching for changes\n"))
921 self.ui.status(_("searching for changes\n"))
922
922
923 unknown = []
923 unknown = []
924 for h in heads:
924 for h in heads:
925 if h not in m:
925 if h not in m:
926 unknown.append(h)
926 unknown.append(h)
927 else:
927 else:
928 base[h] = 1
928 base[h] = 1
929
929
930 if not unknown:
930 if not unknown:
931 return []
931 return []
932
932
933 rep = {}
933 rep = {}
934 reqcnt = 0
934 reqcnt = 0
935
935
936 # search through remote branches
936 # search through remote branches
937 # a 'branch' here is a linear segment of history, with four parts:
937 # a 'branch' here is a linear segment of history, with four parts:
938 # head, root, first parent, second parent
938 # head, root, first parent, second parent
939 # (a branch always has two parents (or none) by definition)
939 # (a branch always has two parents (or none) by definition)
940 unknown = remote.branches(unknown)
940 unknown = remote.branches(unknown)
941 while unknown:
941 while unknown:
942 r = []
942 r = []
943 while unknown:
943 while unknown:
944 n = unknown.pop(0)
944 n = unknown.pop(0)
945 if n[0] in seen:
945 if n[0] in seen:
946 continue
946 continue
947
947
948 self.ui.debug(_("examining %s:%s\n")
948 self.ui.debug(_("examining %s:%s\n")
949 % (short(n[0]), short(n[1])))
949 % (short(n[0]), short(n[1])))
950 if n[0] == nullid:
950 if n[0] == nullid:
951 break
951 break
952 if n in seenbranch:
952 if n in seenbranch:
953 self.ui.debug(_("branch already found\n"))
953 self.ui.debug(_("branch already found\n"))
954 continue
954 continue
955 if n[1] and n[1] in m: # do we know the base?
955 if n[1] and n[1] in m: # do we know the base?
956 self.ui.debug(_("found incomplete branch %s:%s\n")
956 self.ui.debug(_("found incomplete branch %s:%s\n")
957 % (short(n[0]), short(n[1])))
957 % (short(n[0]), short(n[1])))
958 search.append(n) # schedule branch range for scanning
958 search.append(n) # schedule branch range for scanning
959 seenbranch[n] = 1
959 seenbranch[n] = 1
960 else:
960 else:
961 if n[1] not in seen and n[1] not in fetch:
961 if n[1] not in seen and n[1] not in fetch:
962 if n[2] in m and n[3] in m:
962 if n[2] in m and n[3] in m:
963 self.ui.debug(_("found new changeset %s\n") %
963 self.ui.debug(_("found new changeset %s\n") %
964 short(n[1]))
964 short(n[1]))
965 fetch[n[1]] = 1 # earliest unknown
965 fetch[n[1]] = 1 # earliest unknown
966 base[n[2]] = 1 # latest known
966 base[n[2]] = 1 # latest known
967 continue
967 continue
968
968
969 for a in n[2:4]:
969 for a in n[2:4]:
970 if a not in rep:
970 if a not in rep:
971 r.append(a)
971 r.append(a)
972 rep[a] = 1
972 rep[a] = 1
973
973
974 seen[n[0]] = 1
974 seen[n[0]] = 1
975
975
976 if r:
976 if r:
977 reqcnt += 1
977 reqcnt += 1
978 self.ui.debug(_("request %d: %s\n") %
978 self.ui.debug(_("request %d: %s\n") %
979 (reqcnt, " ".join(map(short, r))))
979 (reqcnt, " ".join(map(short, r))))
980 for p in range(0, len(r), 10):
980 for p in range(0, len(r), 10):
981 for b in remote.branches(r[p:p+10]):
981 for b in remote.branches(r[p:p+10]):
982 self.ui.debug(_("received %s:%s\n") %
982 self.ui.debug(_("received %s:%s\n") %
983 (short(b[0]), short(b[1])))
983 (short(b[0]), short(b[1])))
984 if b[0] in m:
984 if b[0] in m:
985 self.ui.debug(_("found base node %s\n")
985 self.ui.debug(_("found base node %s\n")
986 % short(b[0]))
986 % short(b[0]))
987 base[b[0]] = 1
987 base[b[0]] = 1
988 elif b[0] not in seen:
988 elif b[0] not in seen:
989 unknown.append(b)
989 unknown.append(b)
990
990
991 # do binary search on the branches we found
991 # do binary search on the branches we found
992 while search:
992 while search:
993 n = search.pop(0)
993 n = search.pop(0)
994 reqcnt += 1
994 reqcnt += 1
995 l = remote.between([(n[0], n[1])])[0]
995 l = remote.between([(n[0], n[1])])[0]
996 l.append(n[1])
996 l.append(n[1])
997 p = n[0]
997 p = n[0]
998 f = 1
998 f = 1
999 for i in l:
999 for i in l:
1000 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1000 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1001 if i in m:
1001 if i in m:
1002 if f <= 2:
1002 if f <= 2:
1003 self.ui.debug(_("found new branch changeset %s\n") %
1003 self.ui.debug(_("found new branch changeset %s\n") %
1004 short(p))
1004 short(p))
1005 fetch[p] = 1
1005 fetch[p] = 1
1006 base[i] = 1
1006 base[i] = 1
1007 else:
1007 else:
1008 self.ui.debug(_("narrowed branch search to %s:%s\n")
1008 self.ui.debug(_("narrowed branch search to %s:%s\n")
1009 % (short(p), short(i)))
1009 % (short(p), short(i)))
1010 search.append((p, i))
1010 search.append((p, i))
1011 break
1011 break
1012 p, f = i, f * 2
1012 p, f = i, f * 2
1013
1013
1014 # sanity check our fetch list
1014 # sanity check our fetch list
1015 for f in fetch.keys():
1015 for f in fetch.keys():
1016 if f in m:
1016 if f in m:
1017 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1017 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1018
1018
1019 if base.keys() == [nullid]:
1019 if base.keys() == [nullid]:
1020 if force:
1020 if force:
1021 self.ui.warn(_("warning: repository is unrelated\n"))
1021 self.ui.warn(_("warning: repository is unrelated\n"))
1022 else:
1022 else:
1023 raise util.Abort(_("repository is unrelated"))
1023 raise util.Abort(_("repository is unrelated"))
1024
1024
1025 self.ui.note(_("found new changesets starting at ") +
1025 self.ui.note(_("found new changesets starting at ") +
1026 " ".join([short(f) for f in fetch]) + "\n")
1026 " ".join([short(f) for f in fetch]) + "\n")
1027
1027
1028 self.ui.debug(_("%d total queries\n") % reqcnt)
1028 self.ui.debug(_("%d total queries\n") % reqcnt)
1029
1029
1030 return fetch.keys()
1030 return fetch.keys()
1031
1031
1032 def findoutgoing(self, remote, base=None, heads=None, force=False):
1032 def findoutgoing(self, remote, base=None, heads=None, force=False):
1033 """Return list of nodes that are roots of subsets not in remote
1033 """Return list of nodes that are roots of subsets not in remote
1034
1034
1035 If base dict is specified, assume that these nodes and their parents
1035 If base dict is specified, assume that these nodes and their parents
1036 exist on the remote side.
1036 exist on the remote side.
1037 If a list of heads is specified, return only nodes which are heads
1037 If a list of heads is specified, return only nodes which are heads
1038 or ancestors of these heads, and return a second element which
1038 or ancestors of these heads, and return a second element which
1039 contains all remote heads which get new children.
1039 contains all remote heads which get new children.
1040 """
1040 """
1041 if base == None:
1041 if base == None:
1042 base = {}
1042 base = {}
1043 self.findincoming(remote, base, heads, force=force)
1043 self.findincoming(remote, base, heads, force=force)
1044
1044
1045 self.ui.debug(_("common changesets up to ")
1045 self.ui.debug(_("common changesets up to ")
1046 + " ".join(map(short, base.keys())) + "\n")
1046 + " ".join(map(short, base.keys())) + "\n")
1047
1047
1048 remain = dict.fromkeys(self.changelog.nodemap)
1048 remain = dict.fromkeys(self.changelog.nodemap)
1049
1049
1050 # prune everything remote has from the tree
1050 # prune everything remote has from the tree
1051 del remain[nullid]
1051 del remain[nullid]
1052 remove = base.keys()
1052 remove = base.keys()
1053 while remove:
1053 while remove:
1054 n = remove.pop(0)
1054 n = remove.pop(0)
1055 if n in remain:
1055 if n in remain:
1056 del remain[n]
1056 del remain[n]
1057 for p in self.changelog.parents(n):
1057 for p in self.changelog.parents(n):
1058 remove.append(p)
1058 remove.append(p)
1059
1059
1060 # find every node whose parents have been pruned
1060 # find every node whose parents have been pruned
1061 subset = []
1061 subset = []
1062 # find every remote head that will get new children
1062 # find every remote head that will get new children
1063 updated_heads = {}
1063 updated_heads = {}
1064 for n in remain:
1064 for n in remain:
1065 p1, p2 = self.changelog.parents(n)
1065 p1, p2 = self.changelog.parents(n)
1066 if p1 not in remain and p2 not in remain:
1066 if p1 not in remain and p2 not in remain:
1067 subset.append(n)
1067 subset.append(n)
1068 if heads:
1068 if heads:
1069 if p1 in heads:
1069 if p1 in heads:
1070 updated_heads[p1] = True
1070 updated_heads[p1] = True
1071 if p2 in heads:
1071 if p2 in heads:
1072 updated_heads[p2] = True
1072 updated_heads[p2] = True
1073
1073
1074 # this is the set of all roots we have to push
1074 # this is the set of all roots we have to push
1075 if heads:
1075 if heads:
1076 return subset, updated_heads.keys()
1076 return subset, updated_heads.keys()
1077 else:
1077 else:
1078 return subset
1078 return subset
1079
1079
1080 def pull(self, remote, heads=None, force=False):
1080 def pull(self, remote, heads=None, force=False):
1081 l = self.lock()
1081 l = self.lock()
1082
1082
1083 fetch = self.findincoming(remote, force=force)
1083 fetch = self.findincoming(remote, force=force)
1084 if fetch == [nullid]:
1084 if fetch == [nullid]:
1085 self.ui.status(_("requesting all changes\n"))
1085 self.ui.status(_("requesting all changes\n"))
1086
1086
1087 if not fetch:
1087 if not fetch:
1088 self.ui.status(_("no changes found\n"))
1088 self.ui.status(_("no changes found\n"))
1089 return 0
1089 return 0
1090
1090
1091 if heads is None:
1091 if heads is None:
1092 cg = remote.changegroup(fetch, 'pull')
1092 cg = remote.changegroup(fetch, 'pull')
1093 else:
1093 else:
1094 cg = remote.changegroupsubset(fetch, heads, 'pull')
1094 cg = remote.changegroupsubset(fetch, heads, 'pull')
1095 return self.addchangegroup(cg, 'pull')
1095 return self.addchangegroup(cg, 'pull')
1096
1096
1097 def push(self, remote, force=False, revs=None):
1097 def push(self, remote, force=False, revs=None):
1098 lock = remote.lock()
1098 lock = remote.lock()
1099
1099
1100 base = {}
1100 base = {}
1101 remote_heads = remote.heads()
1101 remote_heads = remote.heads()
1102 inc = self.findincoming(remote, base, remote_heads, force=force)
1102 inc = self.findincoming(remote, base, remote_heads, force=force)
1103 if not force and inc:
1103 if not force and inc:
1104 self.ui.warn(_("abort: unsynced remote changes!\n"))
1104 self.ui.warn(_("abort: unsynced remote changes!\n"))
1105 self.ui.status(_("(did you forget to sync?"
1105 self.ui.status(_("(did you forget to sync?"
1106 " use push -f to force)\n"))
1106 " use push -f to force)\n"))
1107 return 1
1107 return 1
1108
1108
1109 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1109 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1110 if revs is not None:
1110 if revs is not None:
1111 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1111 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1112 else:
1112 else:
1113 bases, heads = update, self.changelog.heads()
1113 bases, heads = update, self.changelog.heads()
1114
1114
1115 if not bases:
1115 if not bases:
1116 self.ui.status(_("no changes found\n"))
1116 self.ui.status(_("no changes found\n"))
1117 return 1
1117 return 1
1118 elif not force:
1118 elif not force:
1119 # FIXME we don't properly detect creation of new heads
1119 # FIXME we don't properly detect creation of new heads
1120 # in the push -r case, assume the user knows what he's doing
1120 # in the push -r case, assume the user knows what he's doing
1121 if not revs and len(remote_heads) < len(heads) \
1121 if not revs and len(remote_heads) < len(heads) \
1122 and remote_heads != [nullid]:
1122 and remote_heads != [nullid]:
1123 self.ui.warn(_("abort: push creates new remote branches!\n"))
1123 self.ui.warn(_("abort: push creates new remote branches!\n"))
1124 self.ui.status(_("(did you forget to merge?"
1124 self.ui.status(_("(did you forget to merge?"
1125 " use push -f to force)\n"))
1125 " use push -f to force)\n"))
1126 return 1
1126 return 1
1127
1127
1128 if revs is None:
1128 if revs is None:
1129 cg = self.changegroup(update, 'push')
1129 cg = self.changegroup(update, 'push')
1130 else:
1130 else:
1131 cg = self.changegroupsubset(update, revs, 'push')
1131 cg = self.changegroupsubset(update, revs, 'push')
1132 return remote.addchangegroup(cg, 'push')
1132 return remote.addchangegroup(cg, 'push')
1133
1133
1134 def changegroupsubset(self, bases, heads, source):
1134 def changegroupsubset(self, bases, heads, source):
1135 """This function generates a changegroup consisting of all the nodes
1135 """This function generates a changegroup consisting of all the nodes
1136 that are descendents of any of the bases, and ancestors of any of
1136 that are descendents of any of the bases, and ancestors of any of
1137 the heads.
1137 the heads.
1138
1138
1139 It is fairly complex as determining which filenodes and which
1139 It is fairly complex as determining which filenodes and which
1140 manifest nodes need to be included for the changeset to be complete
1140 manifest nodes need to be included for the changeset to be complete
1141 is non-trivial.
1141 is non-trivial.
1142
1142
1143 Another wrinkle is doing the reverse, figuring out which changeset in
1143 Another wrinkle is doing the reverse, figuring out which changeset in
1144 the changegroup a particular filenode or manifestnode belongs to."""
1144 the changegroup a particular filenode or manifestnode belongs to."""
1145
1145
1146 self.hook('preoutgoing', throw=True, source=source)
1146 self.hook('preoutgoing', throw=True, source=source)
1147
1147
1148 # Set up some initial variables
1148 # Set up some initial variables
1149 # Make it easy to refer to self.changelog
1149 # Make it easy to refer to self.changelog
1150 cl = self.changelog
1150 cl = self.changelog
1151 # msng is short for missing - compute the list of changesets in this
1151 # msng is short for missing - compute the list of changesets in this
1152 # changegroup.
1152 # changegroup.
1153 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1153 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1154 # Some bases may turn out to be superfluous, and some heads may be
1154 # Some bases may turn out to be superfluous, and some heads may be
1155 # too. nodesbetween will return the minimal set of bases and heads
1155 # too. nodesbetween will return the minimal set of bases and heads
1156 # necessary to re-create the changegroup.
1156 # necessary to re-create the changegroup.
1157
1157
1158 # Known heads are the list of heads that it is assumed the recipient
1158 # Known heads are the list of heads that it is assumed the recipient
1159 # of this changegroup will know about.
1159 # of this changegroup will know about.
1160 knownheads = {}
1160 knownheads = {}
1161 # We assume that all parents of bases are known heads.
1161 # We assume that all parents of bases are known heads.
1162 for n in bases:
1162 for n in bases:
1163 for p in cl.parents(n):
1163 for p in cl.parents(n):
1164 if p != nullid:
1164 if p != nullid:
1165 knownheads[p] = 1
1165 knownheads[p] = 1
1166 knownheads = knownheads.keys()
1166 knownheads = knownheads.keys()
1167 if knownheads:
1167 if knownheads:
1168 # Now that we know what heads are known, we can compute which
1168 # Now that we know what heads are known, we can compute which
1169 # changesets are known. The recipient must know about all
1169 # changesets are known. The recipient must know about all
1170 # changesets required to reach the known heads from the null
1170 # changesets required to reach the known heads from the null
1171 # changeset.
1171 # changeset.
1172 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1172 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1173 junk = None
1173 junk = None
1174 # Transform the list into an ersatz set.
1174 # Transform the list into an ersatz set.
1175 has_cl_set = dict.fromkeys(has_cl_set)
1175 has_cl_set = dict.fromkeys(has_cl_set)
1176 else:
1176 else:
1177 # If there were no known heads, the recipient cannot be assumed to
1177 # If there were no known heads, the recipient cannot be assumed to
1178 # know about any changesets.
1178 # know about any changesets.
1179 has_cl_set = {}
1179 has_cl_set = {}
1180
1180
1181 # Make it easy to refer to self.manifest
1181 # Make it easy to refer to self.manifest
1182 mnfst = self.manifest
1182 mnfst = self.manifest
1183 # We don't know which manifests are missing yet
1183 # We don't know which manifests are missing yet
1184 msng_mnfst_set = {}
1184 msng_mnfst_set = {}
1185 # Nor do we know which filenodes are missing.
1185 # Nor do we know which filenodes are missing.
1186 msng_filenode_set = {}
1186 msng_filenode_set = {}
1187
1187
1188 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1188 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1189 junk = None
1189 junk = None
1190
1190
1191 # A changeset always belongs to itself, so the changenode lookup
1191 # A changeset always belongs to itself, so the changenode lookup
1192 # function for a changenode is identity.
1192 # function for a changenode is identity.
1193 def identity(x):
1193 def identity(x):
1194 return x
1194 return x
1195
1195
1196 # A function generating function. Sets up an environment for the
1196 # A function generating function. Sets up an environment for the
1197 # inner function.
1197 # inner function.
1198 def cmp_by_rev_func(revlog):
1198 def cmp_by_rev_func(revlog):
1199 # Compare two nodes by their revision number in the environment's
1199 # Compare two nodes by their revision number in the environment's
1200 # revision history. Since the revision number both represents the
1200 # revision history. Since the revision number both represents the
1201 # most efficient order to read the nodes in, and represents a
1201 # most efficient order to read the nodes in, and represents a
1202 # topological sorting of the nodes, this function is often useful.
1202 # topological sorting of the nodes, this function is often useful.
1203 def cmp_by_rev(a, b):
1203 def cmp_by_rev(a, b):
1204 return cmp(revlog.rev(a), revlog.rev(b))
1204 return cmp(revlog.rev(a), revlog.rev(b))
1205 return cmp_by_rev
1205 return cmp_by_rev
1206
1206
1207 # If we determine that a particular file or manifest node must be a
1207 # If we determine that a particular file or manifest node must be a
1208 # node that the recipient of the changegroup will already have, we can
1208 # node that the recipient of the changegroup will already have, we can
1209 # also assume the recipient will have all the parents. This function
1209 # also assume the recipient will have all the parents. This function
1210 # prunes them from the set of missing nodes.
1210 # prunes them from the set of missing nodes.
1211 def prune_parents(revlog, hasset, msngset):
1211 def prune_parents(revlog, hasset, msngset):
1212 haslst = hasset.keys()
1212 haslst = hasset.keys()
1213 haslst.sort(cmp_by_rev_func(revlog))
1213 haslst.sort(cmp_by_rev_func(revlog))
1214 for node in haslst:
1214 for node in haslst:
1215 parentlst = [p for p in revlog.parents(node) if p != nullid]
1215 parentlst = [p for p in revlog.parents(node) if p != nullid]
1216 while parentlst:
1216 while parentlst:
1217 n = parentlst.pop()
1217 n = parentlst.pop()
1218 if n not in hasset:
1218 if n not in hasset:
1219 hasset[n] = 1
1219 hasset[n] = 1
1220 p = [p for p in revlog.parents(n) if p != nullid]
1220 p = [p for p in revlog.parents(n) if p != nullid]
1221 parentlst.extend(p)
1221 parentlst.extend(p)
1222 for n in hasset:
1222 for n in hasset:
1223 msngset.pop(n, None)
1223 msngset.pop(n, None)
1224
1224
1225 # This is a function generating function used to set up an environment
1225 # This is a function generating function used to set up an environment
1226 # for the inner function to execute in.
1226 # for the inner function to execute in.
1227 def manifest_and_file_collector(changedfileset):
1227 def manifest_and_file_collector(changedfileset):
1228 # This is an information gathering function that gathers
1228 # This is an information gathering function that gathers
1229 # information from each changeset node that goes out as part of
1229 # information from each changeset node that goes out as part of
1230 # the changegroup. The information gathered is a list of which
1230 # the changegroup. The information gathered is a list of which
1231 # manifest nodes are potentially required (the recipient may
1231 # manifest nodes are potentially required (the recipient may
1232 # already have them) and total list of all files which were
1232 # already have them) and total list of all files which were
1233 # changed in any changeset in the changegroup.
1233 # changed in any changeset in the changegroup.
1234 #
1234 #
1235 # We also remember the first changenode we saw any manifest
1235 # We also remember the first changenode we saw any manifest
1236 # referenced by so we can later determine which changenode 'owns'
1236 # referenced by so we can later determine which changenode 'owns'
1237 # the manifest.
1237 # the manifest.
1238 def collect_manifests_and_files(clnode):
1238 def collect_manifests_and_files(clnode):
1239 c = cl.read(clnode)
1239 c = cl.read(clnode)
1240 for f in c[3]:
1240 for f in c[3]:
1241 # This is to make sure we only have one instance of each
1241 # This is to make sure we only have one instance of each
1242 # filename string for each filename.
1242 # filename string for each filename.
1243 changedfileset.setdefault(f, f)
1243 changedfileset.setdefault(f, f)
1244 msng_mnfst_set.setdefault(c[0], clnode)
1244 msng_mnfst_set.setdefault(c[0], clnode)
1245 return collect_manifests_and_files
1245 return collect_manifests_and_files
1246
1246
1247 # Figure out which manifest nodes (of the ones we think might be part
1247 # Figure out which manifest nodes (of the ones we think might be part
1248 # of the changegroup) the recipient must know about and remove them
1248 # of the changegroup) the recipient must know about and remove them
1249 # from the changegroup.
1249 # from the changegroup.
1250 def prune_manifests():
1250 def prune_manifests():
1251 has_mnfst_set = {}
1251 has_mnfst_set = {}
1252 for n in msng_mnfst_set:
1252 for n in msng_mnfst_set:
1253 # If a 'missing' manifest thinks it belongs to a changenode
1253 # If a 'missing' manifest thinks it belongs to a changenode
1254 # the recipient is assumed to have, obviously the recipient
1254 # the recipient is assumed to have, obviously the recipient
1255 # must have that manifest.
1255 # must have that manifest.
1256 linknode = cl.node(mnfst.linkrev(n))
1256 linknode = cl.node(mnfst.linkrev(n))
1257 if linknode in has_cl_set:
1257 if linknode in has_cl_set:
1258 has_mnfst_set[n] = 1
1258 has_mnfst_set[n] = 1
1259 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1259 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1260
1260
1261 # Use the information collected in collect_manifests_and_files to say
1261 # Use the information collected in collect_manifests_and_files to say
1262 # which changenode any manifestnode belongs to.
1262 # which changenode any manifestnode belongs to.
1263 def lookup_manifest_link(mnfstnode):
1263 def lookup_manifest_link(mnfstnode):
1264 return msng_mnfst_set[mnfstnode]
1264 return msng_mnfst_set[mnfstnode]
1265
1265
1266 # A function generating function that sets up the initial environment
1266 # A function generating function that sets up the initial environment
1267 # the inner function.
1267 # the inner function.
1268 def filenode_collector(changedfiles):
1268 def filenode_collector(changedfiles):
1269 next_rev = [0]
1269 next_rev = [0]
1270 # This gathers information from each manifestnode included in the
1270 # This gathers information from each manifestnode included in the
1271 # changegroup about which filenodes the manifest node references
1271 # changegroup about which filenodes the manifest node references
1272 # so we can include those in the changegroup too.
1272 # so we can include those in the changegroup too.
1273 #
1273 #
1274 # It also remembers which changenode each filenode belongs to. It
1274 # It also remembers which changenode each filenode belongs to. It
1275 # does this by assuming the a filenode belongs to the changenode
1275 # does this by assuming the a filenode belongs to the changenode
1276 # the first manifest that references it belongs to.
1276 # the first manifest that references it belongs to.
1277 def collect_msng_filenodes(mnfstnode):
1277 def collect_msng_filenodes(mnfstnode):
1278 r = mnfst.rev(mnfstnode)
1278 r = mnfst.rev(mnfstnode)
1279 if r == next_rev[0]:
1279 if r == next_rev[0]:
1280 # If the last rev we looked at was the one just previous,
1280 # If the last rev we looked at was the one just previous,
1281 # we only need to see a diff.
1281 # we only need to see a diff.
1282 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1282 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1283 # For each line in the delta
1283 # For each line in the delta
1284 for dline in delta.splitlines():
1284 for dline in delta.splitlines():
1285 # get the filename and filenode for that line
1285 # get the filename and filenode for that line
1286 f, fnode = dline.split('\0')
1286 f, fnode = dline.split('\0')
1287 fnode = bin(fnode[:40])
1287 fnode = bin(fnode[:40])
1288 f = changedfiles.get(f, None)
1288 f = changedfiles.get(f, None)
1289 # And if the file is in the list of files we care
1289 # And if the file is in the list of files we care
1290 # about.
1290 # about.
1291 if f is not None:
1291 if f is not None:
1292 # Get the changenode this manifest belongs to
1292 # Get the changenode this manifest belongs to
1293 clnode = msng_mnfst_set[mnfstnode]
1293 clnode = msng_mnfst_set[mnfstnode]
1294 # Create the set of filenodes for the file if
1294 # Create the set of filenodes for the file if
1295 # there isn't one already.
1295 # there isn't one already.
1296 ndset = msng_filenode_set.setdefault(f, {})
1296 ndset = msng_filenode_set.setdefault(f, {})
1297 # And set the filenode's changelog node to the
1297 # And set the filenode's changelog node to the
1298 # manifest's if it hasn't been set already.
1298 # manifest's if it hasn't been set already.
1299 ndset.setdefault(fnode, clnode)
1299 ndset.setdefault(fnode, clnode)
1300 else:
1300 else:
1301 # Otherwise we need a full manifest.
1301 # Otherwise we need a full manifest.
1302 m = mnfst.read(mnfstnode)
1302 m = mnfst.read(mnfstnode)
1303 # For every file in we care about.
1303 # For every file in we care about.
1304 for f in changedfiles:
1304 for f in changedfiles:
1305 fnode = m.get(f, None)
1305 fnode = m.get(f, None)
1306 # If it's in the manifest
1306 # If it's in the manifest
1307 if fnode is not None:
1307 if fnode is not None:
1308 # See comments above.
1308 # See comments above.
1309 clnode = msng_mnfst_set[mnfstnode]
1309 clnode = msng_mnfst_set[mnfstnode]
1310 ndset = msng_filenode_set.setdefault(f, {})
1310 ndset = msng_filenode_set.setdefault(f, {})
1311 ndset.setdefault(fnode, clnode)
1311 ndset.setdefault(fnode, clnode)
1312 # Remember the revision we hope to see next.
1312 # Remember the revision we hope to see next.
1313 next_rev[0] = r + 1
1313 next_rev[0] = r + 1
1314 return collect_msng_filenodes
1314 return collect_msng_filenodes
1315
1315
1316 # We have a list of filenodes we think we need for a file, lets remove
1316 # We have a list of filenodes we think we need for a file, lets remove
1317 # all those we now the recipient must have.
1317 # all those we now the recipient must have.
1318 def prune_filenodes(f, filerevlog):
1318 def prune_filenodes(f, filerevlog):
1319 msngset = msng_filenode_set[f]
1319 msngset = msng_filenode_set[f]
1320 hasset = {}
1320 hasset = {}
1321 # If a 'missing' filenode thinks it belongs to a changenode we
1321 # If a 'missing' filenode thinks it belongs to a changenode we
1322 # assume the recipient must have, then the recipient must have
1322 # assume the recipient must have, then the recipient must have
1323 # that filenode.
1323 # that filenode.
1324 for n in msngset:
1324 for n in msngset:
1325 clnode = cl.node(filerevlog.linkrev(n))
1325 clnode = cl.node(filerevlog.linkrev(n))
1326 if clnode in has_cl_set:
1326 if clnode in has_cl_set:
1327 hasset[n] = 1
1327 hasset[n] = 1
1328 prune_parents(filerevlog, hasset, msngset)
1328 prune_parents(filerevlog, hasset, msngset)
1329
1329
1330 # A function generator function that sets up the a context for the
1330 # A function generator function that sets up the a context for the
1331 # inner function.
1331 # inner function.
1332 def lookup_filenode_link_func(fname):
1332 def lookup_filenode_link_func(fname):
1333 msngset = msng_filenode_set[fname]
1333 msngset = msng_filenode_set[fname]
1334 # Lookup the changenode the filenode belongs to.
1334 # Lookup the changenode the filenode belongs to.
1335 def lookup_filenode_link(fnode):
1335 def lookup_filenode_link(fnode):
1336 return msngset[fnode]
1336 return msngset[fnode]
1337 return lookup_filenode_link
1337 return lookup_filenode_link
1338
1338
1339 # Now that we have all theses utility functions to help out and
1339 # Now that we have all theses utility functions to help out and
1340 # logically divide up the task, generate the group.
1340 # logically divide up the task, generate the group.
1341 def gengroup():
1341 def gengroup():
1342 # The set of changed files starts empty.
1342 # The set of changed files starts empty.
1343 changedfiles = {}
1343 changedfiles = {}
1344 # Create a changenode group generator that will call our functions
1344 # Create a changenode group generator that will call our functions
1345 # back to lookup the owning changenode and collect information.
1345 # back to lookup the owning changenode and collect information.
1346 group = cl.group(msng_cl_lst, identity,
1346 group = cl.group(msng_cl_lst, identity,
1347 manifest_and_file_collector(changedfiles))
1347 manifest_and_file_collector(changedfiles))
1348 for chnk in group:
1348 for chnk in group:
1349 yield chnk
1349 yield chnk
1350
1350
1351 # The list of manifests has been collected by the generator
1351 # The list of manifests has been collected by the generator
1352 # calling our functions back.
1352 # calling our functions back.
1353 prune_manifests()
1353 prune_manifests()
1354 msng_mnfst_lst = msng_mnfst_set.keys()
1354 msng_mnfst_lst = msng_mnfst_set.keys()
1355 # Sort the manifestnodes by revision number.
1355 # Sort the manifestnodes by revision number.
1356 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1356 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1357 # Create a generator for the manifestnodes that calls our lookup
1357 # Create a generator for the manifestnodes that calls our lookup
1358 # and data collection functions back.
1358 # and data collection functions back.
1359 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1359 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1360 filenode_collector(changedfiles))
1360 filenode_collector(changedfiles))
1361 for chnk in group:
1361 for chnk in group:
1362 yield chnk
1362 yield chnk
1363
1363
1364 # These are no longer needed, dereference and toss the memory for
1364 # These are no longer needed, dereference and toss the memory for
1365 # them.
1365 # them.
1366 msng_mnfst_lst = None
1366 msng_mnfst_lst = None
1367 msng_mnfst_set.clear()
1367 msng_mnfst_set.clear()
1368
1368
1369 changedfiles = changedfiles.keys()
1369 changedfiles = changedfiles.keys()
1370 changedfiles.sort()
1370 changedfiles.sort()
1371 # Go through all our files in order sorted by name.
1371 # Go through all our files in order sorted by name.
1372 for fname in changedfiles:
1372 for fname in changedfiles:
1373 filerevlog = self.file(fname)
1373 filerevlog = self.file(fname)
1374 # Toss out the filenodes that the recipient isn't really
1374 # Toss out the filenodes that the recipient isn't really
1375 # missing.
1375 # missing.
1376 if msng_filenode_set.has_key(fname):
1376 if msng_filenode_set.has_key(fname):
1377 prune_filenodes(fname, filerevlog)
1377 prune_filenodes(fname, filerevlog)
1378 msng_filenode_lst = msng_filenode_set[fname].keys()
1378 msng_filenode_lst = msng_filenode_set[fname].keys()
1379 else:
1379 else:
1380 msng_filenode_lst = []
1380 msng_filenode_lst = []
1381 # If any filenodes are left, generate the group for them,
1381 # If any filenodes are left, generate the group for them,
1382 # otherwise don't bother.
1382 # otherwise don't bother.
1383 if len(msng_filenode_lst) > 0:
1383 if len(msng_filenode_lst) > 0:
1384 yield changegroup.genchunk(fname)
1384 yield changegroup.genchunk(fname)
1385 # Sort the filenodes by their revision #
1385 # Sort the filenodes by their revision #
1386 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1386 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1387 # Create a group generator and only pass in a changenode
1387 # Create a group generator and only pass in a changenode
1388 # lookup function as we need to collect no information
1388 # lookup function as we need to collect no information
1389 # from filenodes.
1389 # from filenodes.
1390 group = filerevlog.group(msng_filenode_lst,
1390 group = filerevlog.group(msng_filenode_lst,
1391 lookup_filenode_link_func(fname))
1391 lookup_filenode_link_func(fname))
1392 for chnk in group:
1392 for chnk in group:
1393 yield chnk
1393 yield chnk
1394 if msng_filenode_set.has_key(fname):
1394 if msng_filenode_set.has_key(fname):
1395 # Don't need this anymore, toss it to free memory.
1395 # Don't need this anymore, toss it to free memory.
1396 del msng_filenode_set[fname]
1396 del msng_filenode_set[fname]
1397 # Signal that no more groups are left.
1397 # Signal that no more groups are left.
1398 yield changegroup.closechunk()
1398 yield changegroup.closechunk()
1399
1399
1400 if msng_cl_lst:
1400 if msng_cl_lst:
1401 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1401 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1402
1402
1403 return util.chunkbuffer(gengroup())
1403 return util.chunkbuffer(gengroup())
1404
1404
1405 def changegroup(self, basenodes, source):
1405 def changegroup(self, basenodes, source):
1406 """Generate a changegroup of all nodes that we have that a recipient
1406 """Generate a changegroup of all nodes that we have that a recipient
1407 doesn't.
1407 doesn't.
1408
1408
1409 This is much easier than the previous function as we can assume that
1409 This is much easier than the previous function as we can assume that
1410 the recipient has any changenode we aren't sending them."""
1410 the recipient has any changenode we aren't sending them."""
1411
1411
1412 self.hook('preoutgoing', throw=True, source=source)
1412 self.hook('preoutgoing', throw=True, source=source)
1413
1413
1414 cl = self.changelog
1414 cl = self.changelog
1415 nodes = cl.nodesbetween(basenodes, None)[0]
1415 nodes = cl.nodesbetween(basenodes, None)[0]
1416 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1416 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1417
1417
1418 def identity(x):
1418 def identity(x):
1419 return x
1419 return x
1420
1420
1421 def gennodelst(revlog):
1421 def gennodelst(revlog):
1422 for r in xrange(0, revlog.count()):
1422 for r in xrange(0, revlog.count()):
1423 n = revlog.node(r)
1423 n = revlog.node(r)
1424 if revlog.linkrev(n) in revset:
1424 if revlog.linkrev(n) in revset:
1425 yield n
1425 yield n
1426
1426
1427 def changed_file_collector(changedfileset):
1427 def changed_file_collector(changedfileset):
1428 def collect_changed_files(clnode):
1428 def collect_changed_files(clnode):
1429 c = cl.read(clnode)
1429 c = cl.read(clnode)
1430 for fname in c[3]:
1430 for fname in c[3]:
1431 changedfileset[fname] = 1
1431 changedfileset[fname] = 1
1432 return collect_changed_files
1432 return collect_changed_files
1433
1433
1434 def lookuprevlink_func(revlog):
1434 def lookuprevlink_func(revlog):
1435 def lookuprevlink(n):
1435 def lookuprevlink(n):
1436 return cl.node(revlog.linkrev(n))
1436 return cl.node(revlog.linkrev(n))
1437 return lookuprevlink
1437 return lookuprevlink
1438
1438
1439 def gengroup():
1439 def gengroup():
1440 # construct a list of all changed files
1440 # construct a list of all changed files
1441 changedfiles = {}
1441 changedfiles = {}
1442
1442
1443 for chnk in cl.group(nodes, identity,
1443 for chnk in cl.group(nodes, identity,
1444 changed_file_collector(changedfiles)):
1444 changed_file_collector(changedfiles)):
1445 yield chnk
1445 yield chnk
1446 changedfiles = changedfiles.keys()
1446 changedfiles = changedfiles.keys()
1447 changedfiles.sort()
1447 changedfiles.sort()
1448
1448
1449 mnfst = self.manifest
1449 mnfst = self.manifest
1450 nodeiter = gennodelst(mnfst)
1450 nodeiter = gennodelst(mnfst)
1451 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1451 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1452 yield chnk
1452 yield chnk
1453
1453
1454 for fname in changedfiles:
1454 for fname in changedfiles:
1455 filerevlog = self.file(fname)
1455 filerevlog = self.file(fname)
1456 nodeiter = gennodelst(filerevlog)
1456 nodeiter = gennodelst(filerevlog)
1457 nodeiter = list(nodeiter)
1457 nodeiter = list(nodeiter)
1458 if nodeiter:
1458 if nodeiter:
1459 yield changegroup.genchunk(fname)
1459 yield changegroup.genchunk(fname)
1460 lookup = lookuprevlink_func(filerevlog)
1460 lookup = lookuprevlink_func(filerevlog)
1461 for chnk in filerevlog.group(nodeiter, lookup):
1461 for chnk in filerevlog.group(nodeiter, lookup):
1462 yield chnk
1462 yield chnk
1463
1463
1464 yield changegroup.closechunk()
1464 yield changegroup.closechunk()
1465
1465
1466 if nodes:
1466 if nodes:
1467 self.hook('outgoing', node=hex(nodes[0]), source=source)
1467 self.hook('outgoing', node=hex(nodes[0]), source=source)
1468
1468
1469 return util.chunkbuffer(gengroup())
1469 return util.chunkbuffer(gengroup())
1470
1470
1471 def addchangegroup(self, source, srctype):
1471 def addchangegroup(self, source, srctype):
1472 """add changegroup to repo.
1472 """add changegroup to repo.
1473 returns number of heads modified or added + 1."""
1473 returns number of heads modified or added + 1."""
1474
1474
1475 def csmap(x):
1475 def csmap(x):
1476 self.ui.debug(_("add changeset %s\n") % short(x))
1476 self.ui.debug(_("add changeset %s\n") % short(x))
1477 return cl.count()
1477 return cl.count()
1478
1478
1479 def revmap(x):
1479 def revmap(x):
1480 return cl.rev(x)
1480 return cl.rev(x)
1481
1481
1482 if not source:
1482 if not source:
1483 return 0
1483 return 0
1484
1484
1485 self.hook('prechangegroup', throw=True, source=srctype)
1485 self.hook('prechangegroup', throw=True, source=srctype)
1486
1486
1487 changesets = files = revisions = 0
1487 changesets = files = revisions = 0
1488
1488
1489 tr = self.transaction()
1489 tr = self.transaction()
1490
1490
1491 # write changelog and manifest data to temp files so
1491 # write changelog and manifest data to temp files so
1492 # concurrent readers will not see inconsistent view
1492 # concurrent readers will not see inconsistent view
1493 cl = None
1493 cl = None
1494 try:
1494 try:
1495 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1495 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1496
1496
1497 oldheads = len(cl.heads())
1497 oldheads = len(cl.heads())
1498
1498
1499 # pull off the changeset group
1499 # pull off the changeset group
1500 self.ui.status(_("adding changesets\n"))
1500 self.ui.status(_("adding changesets\n"))
1501 co = cl.tip()
1501 co = cl.tip()
1502 chunkiter = changegroup.chunkiter(source)
1502 chunkiter = changegroup.chunkiter(source)
1503 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1503 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1504 cnr, cor = map(cl.rev, (cn, co))
1504 cnr, cor = map(cl.rev, (cn, co))
1505 if cn == nullid:
1505 if cn == nullid:
1506 cnr = cor
1506 cnr = cor
1507 changesets = cnr - cor
1507 changesets = cnr - cor
1508
1508
1509 mf = None
1509 mf = None
1510 try:
1510 try:
1511 mf = appendfile.appendmanifest(self.opener,
1511 mf = appendfile.appendmanifest(self.opener,
1512 self.manifest.version)
1512 self.manifest.version)
1513
1513
1514 # pull off the manifest group
1514 # pull off the manifest group
1515 self.ui.status(_("adding manifests\n"))
1515 self.ui.status(_("adding manifests\n"))
1516 mm = mf.tip()
1516 mm = mf.tip()
1517 chunkiter = changegroup.chunkiter(source)
1517 chunkiter = changegroup.chunkiter(source)
1518 mo = mf.addgroup(chunkiter, revmap, tr)
1518 mo = mf.addgroup(chunkiter, revmap, tr)
1519
1519
1520 # process the files
1520 # process the files
1521 self.ui.status(_("adding file changes\n"))
1521 self.ui.status(_("adding file changes\n"))
1522 while 1:
1522 while 1:
1523 f = changegroup.getchunk(source)
1523 f = changegroup.getchunk(source)
1524 if not f:
1524 if not f:
1525 break
1525 break
1526 self.ui.debug(_("adding %s revisions\n") % f)
1526 self.ui.debug(_("adding %s revisions\n") % f)
1527 fl = self.file(f)
1527 fl = self.file(f)
1528 o = fl.count()
1528 o = fl.count()
1529 chunkiter = changegroup.chunkiter(source)
1529 chunkiter = changegroup.chunkiter(source)
1530 n = fl.addgroup(chunkiter, revmap, tr)
1530 n = fl.addgroup(chunkiter, revmap, tr)
1531 revisions += fl.count() - o
1531 revisions += fl.count() - o
1532 files += 1
1532 files += 1
1533
1533
1534 # write order here is important so concurrent readers will see
1534 # write order here is important so concurrent readers will see
1535 # consistent view of repo
1535 # consistent view of repo
1536 mf.writedata()
1536 mf.writedata()
1537 finally:
1537 finally:
1538 if mf:
1538 if mf:
1539 mf.cleanup()
1539 mf.cleanup()
1540 cl.writedata()
1540 cl.writedata()
1541 finally:
1541 finally:
1542 if cl:
1542 if cl:
1543 cl.cleanup()
1543 cl.cleanup()
1544
1544
1545 # make changelog and manifest see real files again
1545 # make changelog and manifest see real files again
1546 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1546 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1547 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1547 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1548 self.changelog.checkinlinesize(tr)
1548 self.changelog.checkinlinesize(tr)
1549 self.manifest.checkinlinesize(tr)
1549 self.manifest.checkinlinesize(tr)
1550
1550
1551 newheads = len(self.changelog.heads())
1551 newheads = len(self.changelog.heads())
1552 heads = ""
1552 heads = ""
1553 if oldheads and newheads > oldheads:
1553 if oldheads and newheads > oldheads:
1554 heads = _(" (+%d heads)") % (newheads - oldheads)
1554 heads = _(" (+%d heads)") % (newheads - oldheads)
1555
1555
1556 self.ui.status(_("added %d changesets"
1556 self.ui.status(_("added %d changesets"
1557 " with %d changes to %d files%s\n")
1557 " with %d changes to %d files%s\n")
1558 % (changesets, revisions, files, heads))
1558 % (changesets, revisions, files, heads))
1559
1559
1560 if changesets > 0:
1560 if changesets > 0:
1561 self.hook('pretxnchangegroup', throw=True,
1561 self.hook('pretxnchangegroup', throw=True,
1562 node=hex(self.changelog.node(cor+1)), source=srctype)
1562 node=hex(self.changelog.node(cor+1)), source=srctype)
1563
1563
1564 tr.close()
1564 tr.close()
1565
1565
1566 if changesets > 0:
1566 if changesets > 0:
1567 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1567 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1568 source=srctype)
1568 source=srctype)
1569
1569
1570 for i in range(cor + 1, cnr + 1):
1570 for i in range(cor + 1, cnr + 1):
1571 self.hook("incoming", node=hex(self.changelog.node(i)),
1571 self.hook("incoming", node=hex(self.changelog.node(i)),
1572 source=srctype)
1572 source=srctype)
1573
1573
1574 return newheads - oldheads + 1
1574 return newheads - oldheads + 1
1575
1575
1576 def update(self, node, allow=False, force=False, choose=None,
1576 def update(self, node, allow=False, force=False, choose=None,
1577 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1577 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1578 pl = self.dirstate.parents()
1578 pl = self.dirstate.parents()
1579 if not force and pl[1] != nullid:
1579 if not force and pl[1] != nullid:
1580 raise util.Abort(_("outstanding uncommitted merges"))
1580 raise util.Abort(_("outstanding uncommitted merges"))
1581
1581
1582 err = False
1582 err = False
1583
1583
1584 p1, p2 = pl[0], node
1584 p1, p2 = pl[0], node
1585 pa = self.changelog.ancestor(p1, p2)
1585 pa = self.changelog.ancestor(p1, p2)
1586 m1n = self.changelog.read(p1)[0]
1586 m1n = self.changelog.read(p1)[0]
1587 m2n = self.changelog.read(p2)[0]
1587 m2n = self.changelog.read(p2)[0]
1588 man = self.manifest.ancestor(m1n, m2n)
1588 man = self.manifest.ancestor(m1n, m2n)
1589 m1 = self.manifest.read(m1n)
1589 m1 = self.manifest.read(m1n)
1590 mf1 = self.manifest.readflags(m1n)
1590 mf1 = self.manifest.readflags(m1n)
1591 m2 = self.manifest.read(m2n).copy()
1591 m2 = self.manifest.read(m2n).copy()
1592 mf2 = self.manifest.readflags(m2n)
1592 mf2 = self.manifest.readflags(m2n)
1593 ma = self.manifest.read(man)
1593 ma = self.manifest.read(man)
1594 mfa = self.manifest.readflags(man)
1594 mfa = self.manifest.readflags(man)
1595
1595
1596 modified, added, removed, deleted, unknown = self.changes()
1596 modified, added, removed, deleted, unknown = self.changes()
1597
1597
1598 # is this a jump, or a merge? i.e. is there a linear path
1598 # is this a jump, or a merge? i.e. is there a linear path
1599 # from p1 to p2?
1599 # from p1 to p2?
1600 linear_path = (pa == p1 or pa == p2)
1600 linear_path = (pa == p1 or pa == p2)
1601
1601
1602 if allow and linear_path:
1602 if allow and linear_path:
1603 raise util.Abort(_("there is nothing to merge, "
1603 raise util.Abort(_("there is nothing to merge, "
1604 "just use 'hg update'"))
1604 "just use 'hg update'"))
1605 if allow and not forcemerge:
1605 if allow and not forcemerge:
1606 if modified or added or removed:
1606 if modified or added or removed:
1607 raise util.Abort(_("outstanding uncommitted changes"))
1607 raise util.Abort(_("outstanding uncommitted changes"))
1608
1608
1609 if not forcemerge and not force:
1609 if not forcemerge and not force:
1610 for f in unknown:
1610 for f in unknown:
1611 if f in m2:
1611 if f in m2:
1612 t1 = self.wread(f)
1612 t1 = self.wread(f)
1613 t2 = self.file(f).read(m2[f])
1613 t2 = self.file(f).read(m2[f])
1614 if cmp(t1, t2) != 0:
1614 if cmp(t1, t2) != 0:
1615 raise util.Abort(_("'%s' already exists in the working"
1615 raise util.Abort(_("'%s' already exists in the working"
1616 " dir and differs from remote") % f)
1616 " dir and differs from remote") % f)
1617
1617
1618 # resolve the manifest to determine which files
1618 # resolve the manifest to determine which files
1619 # we care about merging
1619 # we care about merging
1620 self.ui.note(_("resolving manifests\n"))
1620 self.ui.note(_("resolving manifests\n"))
1621 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1621 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1622 (force, allow, moddirstate, linear_path))
1622 (force, allow, moddirstate, linear_path))
1623 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1623 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1624 (short(man), short(m1n), short(m2n)))
1624 (short(man), short(m1n), short(m2n)))
1625
1625
1626 merge = {}
1626 merge = {}
1627 get = {}
1627 get = {}
1628 remove = []
1628 remove = []
1629
1629
1630 # construct a working dir manifest
1630 # construct a working dir manifest
1631 mw = m1.copy()
1631 mw = m1.copy()
1632 mfw = mf1.copy()
1632 mfw = mf1.copy()
1633 umap = dict.fromkeys(unknown)
1633 umap = dict.fromkeys(unknown)
1634
1634
1635 for f in added + modified + unknown:
1635 for f in added + modified + unknown:
1636 mw[f] = ""
1636 mw[f] = ""
1637 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1637 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1638
1638
1639 if moddirstate and not wlock:
1639 if moddirstate and not wlock:
1640 wlock = self.wlock()
1640 wlock = self.wlock()
1641
1641
1642 for f in deleted + removed:
1642 for f in deleted + removed:
1643 if f in mw:
1643 if f in mw:
1644 del mw[f]
1644 del mw[f]
1645
1645
1646 # If we're jumping between revisions (as opposed to merging),
1646 # If we're jumping between revisions (as opposed to merging),
1647 # and if neither the working directory nor the target rev has
1647 # and if neither the working directory nor the target rev has
1648 # the file, then we need to remove it from the dirstate, to
1648 # the file, then we need to remove it from the dirstate, to
1649 # prevent the dirstate from listing the file when it is no
1649 # prevent the dirstate from listing the file when it is no
1650 # longer in the manifest.
1650 # longer in the manifest.
1651 if moddirstate and linear_path and f not in m2:
1651 if moddirstate and linear_path and f not in m2:
1652 self.dirstate.forget((f,))
1652 self.dirstate.forget((f,))
1653
1653
1654 # Compare manifests
1654 # Compare manifests
1655 for f, n in mw.iteritems():
1655 for f, n in mw.iteritems():
1656 if choose and not choose(f):
1656 if choose and not choose(f):
1657 continue
1657 continue
1658 if f in m2:
1658 if f in m2:
1659 s = 0
1659 s = 0
1660
1660
1661 # is the wfile new since m1, and match m2?
1661 # is the wfile new since m1, and match m2?
1662 if f not in m1:
1662 if f not in m1:
1663 t1 = self.wread(f)
1663 t1 = self.wread(f)
1664 t2 = self.file(f).read(m2[f])
1664 t2 = self.file(f).read(m2[f])
1665 if cmp(t1, t2) == 0:
1665 if cmp(t1, t2) == 0:
1666 n = m2[f]
1666 n = m2[f]
1667 del t1, t2
1667 del t1, t2
1668
1668
1669 # are files different?
1669 # are files different?
1670 if n != m2[f]:
1670 if n != m2[f]:
1671 a = ma.get(f, nullid)
1671 a = ma.get(f, nullid)
1672 # are both different from the ancestor?
1672 # are both different from the ancestor?
1673 if n != a and m2[f] != a:
1673 if n != a and m2[f] != a:
1674 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1674 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1675 # merge executable bits
1675 # merge executable bits
1676 # "if we changed or they changed, change in merge"
1676 # "if we changed or they changed, change in merge"
1677 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1677 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1678 mode = ((a^b) | (a^c)) ^ a
1678 mode = ((a^b) | (a^c)) ^ a
1679 merge[f] = (m1.get(f, nullid), m2[f], mode)
1679 merge[f] = (m1.get(f, nullid), m2[f], mode)
1680 s = 1
1680 s = 1
1681 # are we clobbering?
1681 # are we clobbering?
1682 # is remote's version newer?
1682 # is remote's version newer?
1683 # or are we going back in time?
1683 # or are we going back in time?
1684 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1684 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1685 self.ui.debug(_(" remote %s is newer, get\n") % f)
1685 self.ui.debug(_(" remote %s is newer, get\n") % f)
1686 get[f] = m2[f]
1686 get[f] = m2[f]
1687 s = 1
1687 s = 1
1688 elif f in umap or f in added:
1688 elif f in umap or f in added:
1689 # this unknown file is the same as the checkout
1689 # this unknown file is the same as the checkout
1690 # we need to reset the dirstate if the file was added
1690 # we need to reset the dirstate if the file was added
1691 get[f] = m2[f]
1691 get[f] = m2[f]
1692
1692
1693 if not s and mfw[f] != mf2[f]:
1693 if not s and mfw[f] != mf2[f]:
1694 if force:
1694 if force:
1695 self.ui.debug(_(" updating permissions for %s\n") % f)
1695 self.ui.debug(_(" updating permissions for %s\n") % f)
1696 util.set_exec(self.wjoin(f), mf2[f])
1696 util.set_exec(self.wjoin(f), mf2[f])
1697 else:
1697 else:
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1699 mode = ((a^b) | (a^c)) ^ a
1699 mode = ((a^b) | (a^c)) ^ a
1700 if mode != b:
1700 if mode != b:
1701 self.ui.debug(_(" updating permissions for %s\n")
1701 self.ui.debug(_(" updating permissions for %s\n")
1702 % f)
1702 % f)
1703 util.set_exec(self.wjoin(f), mode)
1703 util.set_exec(self.wjoin(f), mode)
1704 del m2[f]
1704 del m2[f]
1705 elif f in ma:
1705 elif f in ma:
1706 if n != ma[f]:
1706 if n != ma[f]:
1707 r = _("d")
1707 r = _("d")
1708 if not force and (linear_path or allow):
1708 if not force and (linear_path or allow):
1709 r = self.ui.prompt(
1709 r = self.ui.prompt(
1710 (_(" local changed %s which remote deleted\n") % f) +
1710 (_(" local changed %s which remote deleted\n") % f) +
1711 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1711 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1712 if r == _("d"):
1712 if r == _("d"):
1713 remove.append(f)
1713 remove.append(f)
1714 else:
1714 else:
1715 self.ui.debug(_("other deleted %s\n") % f)
1715 self.ui.debug(_("other deleted %s\n") % f)
1716 remove.append(f) # other deleted it
1716 remove.append(f) # other deleted it
1717 else:
1717 else:
1718 # file is created on branch or in working directory
1718 # file is created on branch or in working directory
1719 if force and f not in umap:
1719 if force and f not in umap:
1720 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1720 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1721 remove.append(f)
1721 remove.append(f)
1722 elif n == m1.get(f, nullid): # same as parent
1722 elif n == m1.get(f, nullid): # same as parent
1723 if p2 == pa: # going backwards?
1723 if p2 == pa: # going backwards?
1724 self.ui.debug(_("remote deleted %s\n") % f)
1724 self.ui.debug(_("remote deleted %s\n") % f)
1725 remove.append(f)
1725 remove.append(f)
1726 else:
1726 else:
1727 self.ui.debug(_("local modified %s, keeping\n") % f)
1727 self.ui.debug(_("local modified %s, keeping\n") % f)
1728 else:
1728 else:
1729 self.ui.debug(_("working dir created %s, keeping\n") % f)
1729 self.ui.debug(_("working dir created %s, keeping\n") % f)
1730
1730
1731 for f, n in m2.iteritems():
1731 for f, n in m2.iteritems():
1732 if choose and not choose(f):
1732 if choose and not choose(f):
1733 continue
1733 continue
1734 if f[0] == "/":
1734 if f[0] == "/":
1735 continue
1735 continue
1736 if f in ma and n != ma[f]:
1736 if f in ma and n != ma[f]:
1737 r = _("k")
1737 r = _("k")
1738 if not force and (linear_path or allow):
1738 if not force and (linear_path or allow):
1739 r = self.ui.prompt(
1739 r = self.ui.prompt(
1740 (_("remote changed %s which local deleted\n") % f) +
1740 (_("remote changed %s which local deleted\n") % f) +
1741 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1741 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1742 if r == _("k"):
1742 if r == _("k"):
1743 get[f] = n
1743 get[f] = n
1744 elif f not in ma:
1744 elif f not in ma:
1745 self.ui.debug(_("remote created %s\n") % f)
1745 self.ui.debug(_("remote created %s\n") % f)
1746 get[f] = n
1746 get[f] = n
1747 else:
1747 else:
1748 if force or p2 == pa: # going backwards?
1748 if force or p2 == pa: # going backwards?
1749 self.ui.debug(_("local deleted %s, recreating\n") % f)
1749 self.ui.debug(_("local deleted %s, recreating\n") % f)
1750 get[f] = n
1750 get[f] = n
1751 else:
1751 else:
1752 self.ui.debug(_("local deleted %s\n") % f)
1752 self.ui.debug(_("local deleted %s\n") % f)
1753
1753
1754 del mw, m1, m2, ma
1754 del mw, m1, m2, ma
1755
1755
1756 if force:
1756 if force:
1757 for f in merge:
1757 for f in merge:
1758 get[f] = merge[f][1]
1758 get[f] = merge[f][1]
1759 merge = {}
1759 merge = {}
1760
1760
1761 if linear_path or force:
1761 if linear_path or force:
1762 # we don't need to do any magic, just jump to the new rev
1762 # we don't need to do any magic, just jump to the new rev
1763 branch_merge = False
1763 branch_merge = False
1764 p1, p2 = p2, nullid
1764 p1, p2 = p2, nullid
1765 else:
1765 else:
1766 if not allow:
1766 if not allow:
1767 self.ui.status(_("this update spans a branch"
1767 self.ui.status(_("this update spans a branch"
1768 " affecting the following files:\n"))
1768 " affecting the following files:\n"))
1769 fl = merge.keys() + get.keys()
1769 fl = merge.keys() + get.keys()
1770 fl.sort()
1770 fl.sort()
1771 for f in fl:
1771 for f in fl:
1772 cf = ""
1772 cf = ""
1773 if f in merge:
1773 if f in merge:
1774 cf = _(" (resolve)")
1774 cf = _(" (resolve)")
1775 self.ui.status(" %s%s\n" % (f, cf))
1775 self.ui.status(" %s%s\n" % (f, cf))
1776 self.ui.warn(_("aborting update spanning branches!\n"))
1776 self.ui.warn(_("aborting update spanning branches!\n"))
1777 self.ui.status(_("(use 'hg merge' to merge across branches"
1777 self.ui.status(_("(use 'hg merge' to merge across branches"
1778 " or 'hg update -C' to lose changes)\n"))
1778 " or 'hg update -C' to lose changes)\n"))
1779 return 1
1779 return 1
1780 branch_merge = True
1780 branch_merge = True
1781
1781
1782 xp1 = hex(p1)
1782 xp1 = hex(p1)
1783 xp2 = hex(p2)
1783 xp2 = hex(p2)
1784 if p2 == nullid: xxp2 = ''
1784 if p2 == nullid: xxp2 = ''
1785 else: xxp2 = xp2
1785 else: xxp2 = xp2
1786
1786
1787 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1787 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1788
1788
1789 # get the files we don't need to change
1789 # get the files we don't need to change
1790 files = get.keys()
1790 files = get.keys()
1791 files.sort()
1791 files.sort()
1792 for f in files:
1792 for f in files:
1793 if f[0] == "/":
1793 if f[0] == "/":
1794 continue
1794 continue
1795 self.ui.note(_("getting %s\n") % f)
1795 self.ui.note(_("getting %s\n") % f)
1796 t = self.file(f).read(get[f])
1796 t = self.file(f).read(get[f])
1797 self.wwrite(f, t)
1797 self.wwrite(f, t)
1798 util.set_exec(self.wjoin(f), mf2[f])
1798 util.set_exec(self.wjoin(f), mf2[f])
1799 if moddirstate:
1799 if moddirstate:
1800 if branch_merge:
1800 if branch_merge:
1801 self.dirstate.update([f], 'n', st_mtime=-1)
1801 self.dirstate.update([f], 'n', st_mtime=-1)
1802 else:
1802 else:
1803 self.dirstate.update([f], 'n')
1803 self.dirstate.update([f], 'n')
1804
1804
1805 # merge the tricky bits
1805 # merge the tricky bits
1806 failedmerge = []
1806 failedmerge = []
1807 files = merge.keys()
1807 files = merge.keys()
1808 files.sort()
1808 files.sort()
1809 for f in files:
1809 for f in files:
1810 self.ui.status(_("merging %s\n") % f)
1810 self.ui.status(_("merging %s\n") % f)
1811 my, other, flag = merge[f]
1811 my, other, flag = merge[f]
1812 ret = self.merge3(f, my, other, xp1, xp2)
1812 ret = self.merge3(f, my, other, xp1, xp2)
1813 if ret:
1813 if ret:
1814 err = True
1814 err = True
1815 failedmerge.append(f)
1815 failedmerge.append(f)
1816 util.set_exec(self.wjoin(f), flag)
1816 util.set_exec(self.wjoin(f), flag)
1817 if moddirstate:
1817 if moddirstate:
1818 if branch_merge:
1818 if branch_merge:
1819 # We've done a branch merge, mark this file as merged
1819 # We've done a branch merge, mark this file as merged
1820 # so that we properly record the merger later
1820 # so that we properly record the merger later
1821 self.dirstate.update([f], 'm')
1821 self.dirstate.update([f], 'm')
1822 else:
1822 else:
1823 # We've update-merged a locally modified file, so
1823 # We've update-merged a locally modified file, so
1824 # we set the dirstate to emulate a normal checkout
1824 # we set the dirstate to emulate a normal checkout
1825 # of that file some time in the past. Thus our
1825 # of that file some time in the past. Thus our
1826 # merge will appear as a normal local file
1826 # merge will appear as a normal local file
1827 # modification.
1827 # modification.
1828 f_len = len(self.file(f).read(other))
1828 f_len = len(self.file(f).read(other))
1829 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1829 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1830
1830
1831 remove.sort()
1831 remove.sort()
1832 for f in remove:
1832 for f in remove:
1833 self.ui.note(_("removing %s\n") % f)
1833 self.ui.note(_("removing %s\n") % f)
1834 util.audit_path(f)
1834 util.audit_path(f)
1835 try:
1835 try:
1836 util.unlink(self.wjoin(f))
1836 util.unlink(self.wjoin(f))
1837 except OSError, inst:
1837 except OSError, inst:
1838 if inst.errno != errno.ENOENT:
1838 if inst.errno != errno.ENOENT:
1839 self.ui.warn(_("update failed to remove %s: %s!\n") %
1839 self.ui.warn(_("update failed to remove %s: %s!\n") %
1840 (f, inst.strerror))
1840 (f, inst.strerror))
1841 if moddirstate:
1841 if moddirstate:
1842 if branch_merge:
1842 if branch_merge:
1843 self.dirstate.update(remove, 'r')
1843 self.dirstate.update(remove, 'r')
1844 else:
1844 else:
1845 self.dirstate.forget(remove)
1845 self.dirstate.forget(remove)
1846
1846
1847 if moddirstate:
1847 if moddirstate:
1848 self.dirstate.setparents(p1, p2)
1848 self.dirstate.setparents(p1, p2)
1849
1849
1850 if show_stats:
1850 if show_stats:
1851 stats = ((len(get), _("updated")),
1851 stats = ((len(get), _("updated")),
1852 (len(merge) - len(failedmerge), _("merged")),
1852 (len(merge) - len(failedmerge), _("merged")),
1853 (len(remove), _("removed")),
1853 (len(remove), _("removed")),
1854 (len(failedmerge), _("unresolved")))
1854 (len(failedmerge), _("unresolved")))
1855 note = ", ".join([_("%d files %s") % s for s in stats])
1855 note = ", ".join([_("%d files %s") % s for s in stats])
1856 self.ui.status("%s\n" % note)
1856 self.ui.status("%s\n" % note)
1857 if moddirstate:
1857 if moddirstate:
1858 if branch_merge:
1858 if branch_merge:
1859 if failedmerge:
1859 if failedmerge:
1860 self.ui.status(_("There are unresolved merges,"
1860 self.ui.status(_("There are unresolved merges,"
1861 " you can redo the full merge using:\n"
1861 " you can redo the full merge using:\n"
1862 " hg update -C %s\n"
1862 " hg update -C %s\n"
1863 " hg merge %s\n"
1863 " hg merge %s\n"
1864 % (self.changelog.rev(p1),
1864 % (self.changelog.rev(p1),
1865 self.changelog.rev(p2))))
1865 self.changelog.rev(p2))))
1866 else:
1866 else:
1867 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1867 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1868 elif failedmerge:
1868 elif failedmerge:
1869 self.ui.status(_("There are unresolved merges with"
1869 self.ui.status(_("There are unresolved merges with"
1870 " locally modified files.\n"))
1870 " locally modified files.\n"))
1871
1871
1872 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1872 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1873 return err
1873 return err
1874
1874
1875 def merge3(self, fn, my, other, p1, p2):
1875 def merge3(self, fn, my, other, p1, p2):
1876 """perform a 3-way merge in the working directory"""
1876 """perform a 3-way merge in the working directory"""
1877
1877
1878 def temp(prefix, node):
1878 def temp(prefix, node):
1879 pre = "%s~%s." % (os.path.basename(fn), prefix)
1879 pre = "%s~%s." % (os.path.basename(fn), prefix)
1880 (fd, name) = tempfile.mkstemp(prefix=pre)
1880 (fd, name) = tempfile.mkstemp(prefix=pre)
1881 f = os.fdopen(fd, "wb")
1881 f = os.fdopen(fd, "wb")
1882 self.wwrite(fn, fl.read(node), f)
1882 self.wwrite(fn, fl.read(node), f)
1883 f.close()
1883 f.close()
1884 return name
1884 return name
1885
1885
1886 fl = self.file(fn)
1886 fl = self.file(fn)
1887 base = fl.ancestor(my, other)
1887 base = fl.ancestor(my, other)
1888 a = self.wjoin(fn)
1888 a = self.wjoin(fn)
1889 b = temp("base", base)
1889 b = temp("base", base)
1890 c = temp("other", other)
1890 c = temp("other", other)
1891
1891
1892 self.ui.note(_("resolving %s\n") % fn)
1892 self.ui.note(_("resolving %s\n") % fn)
1893 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1893 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1894 (fn, short(my), short(other), short(base)))
1894 (fn, short(my), short(other), short(base)))
1895
1895
1896 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1896 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1897 or "hgmerge")
1897 or "hgmerge")
1898 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1898 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1899 environ={'HG_FILE': fn,
1899 environ={'HG_FILE': fn,
1900 'HG_MY_NODE': p1,
1900 'HG_MY_NODE': p1,
1901 'HG_OTHER_NODE': p2,
1901 'HG_OTHER_NODE': p2,
1902 'HG_FILE_MY_NODE': hex(my),
1902 'HG_FILE_MY_NODE': hex(my),
1903 'HG_FILE_OTHER_NODE': hex(other),
1903 'HG_FILE_OTHER_NODE': hex(other),
1904 'HG_FILE_BASE_NODE': hex(base)})
1904 'HG_FILE_BASE_NODE': hex(base)})
1905 if r:
1905 if r:
1906 self.ui.warn(_("merging %s failed!\n") % fn)
1906 self.ui.warn(_("merging %s failed!\n") % fn)
1907
1907
1908 os.unlink(b)
1908 os.unlink(b)
1909 os.unlink(c)
1909 os.unlink(c)
1910 return r
1910 return r
1911
1911
1912 def verify(self):
1912 def verify(self):
1913 filelinkrevs = {}
1913 filelinkrevs = {}
1914 filenodes = {}
1914 filenodes = {}
1915 changesets = revisions = files = 0
1915 changesets = revisions = files = 0
1916 errors = [0]
1916 errors = [0]
1917 warnings = [0]
1917 warnings = [0]
1918 neededmanifests = {}
1918 neededmanifests = {}
1919
1919
1920 def err(msg):
1920 def err(msg):
1921 self.ui.warn(msg + "\n")
1921 self.ui.warn(msg + "\n")
1922 errors[0] += 1
1922 errors[0] += 1
1923
1923
1924 def warn(msg):
1924 def warn(msg):
1925 self.ui.warn(msg + "\n")
1925 self.ui.warn(msg + "\n")
1926 warnings[0] += 1
1926 warnings[0] += 1
1927
1927
1928 def checksize(obj, name):
1928 def checksize(obj, name):
1929 d = obj.checksize()
1929 d = obj.checksize()
1930 if d[0]:
1930 if d[0]:
1931 err(_("%s data length off by %d bytes") % (name, d[0]))
1931 err(_("%s data length off by %d bytes") % (name, d[0]))
1932 if d[1]:
1932 if d[1]:
1933 err(_("%s index contains %d extra bytes") % (name, d[1]))
1933 err(_("%s index contains %d extra bytes") % (name, d[1]))
1934
1934
1935 def checkversion(obj, name):
1935 def checkversion(obj, name):
1936 if obj.version != revlog.REVLOGV0:
1936 if obj.version != revlog.REVLOGV0:
1937 if not revlogv1:
1937 if not revlogv1:
1938 warn(_("warning: `%s' uses revlog format 1") % name)
1938 warn(_("warning: `%s' uses revlog format 1") % name)
1939 elif revlogv1:
1939 elif revlogv1:
1940 warn(_("warning: `%s' uses revlog format 0") % name)
1940 warn(_("warning: `%s' uses revlog format 0") % name)
1941
1941
1942 revlogv1 = self.revlogversion != revlog.REVLOGV0
1942 revlogv1 = self.revlogversion != revlog.REVLOGV0
1943 if self.ui.verbose or revlogv1 != self.revlogv1:
1943 if self.ui.verbose or revlogv1 != self.revlogv1:
1944 self.ui.status(_("repository uses revlog format %d\n") %
1944 self.ui.status(_("repository uses revlog format %d\n") %
1945 (revlogv1 and 1 or 0))
1945 (revlogv1 and 1 or 0))
1946
1946
1947 seen = {}
1947 seen = {}
1948 self.ui.status(_("checking changesets\n"))
1948 self.ui.status(_("checking changesets\n"))
1949 checksize(self.changelog, "changelog")
1949 checksize(self.changelog, "changelog")
1950
1950
1951 for i in range(self.changelog.count()):
1951 for i in range(self.changelog.count()):
1952 changesets += 1
1952 changesets += 1
1953 n = self.changelog.node(i)
1953 n = self.changelog.node(i)
1954 l = self.changelog.linkrev(n)
1954 l = self.changelog.linkrev(n)
1955 if l != i:
1955 if l != i:
1956 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1956 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1957 if n in seen:
1957 if n in seen:
1958 err(_("duplicate changeset at revision %d") % i)
1958 err(_("duplicate changeset at revision %d") % i)
1959 seen[n] = 1
1959 seen[n] = 1
1960
1960
1961 for p in self.changelog.parents(n):
1961 for p in self.changelog.parents(n):
1962 if p not in self.changelog.nodemap:
1962 if p not in self.changelog.nodemap:
1963 err(_("changeset %s has unknown parent %s") %
1963 err(_("changeset %s has unknown parent %s") %
1964 (short(n), short(p)))
1964 (short(n), short(p)))
1965 try:
1965 try:
1966 changes = self.changelog.read(n)
1966 changes = self.changelog.read(n)
1967 except KeyboardInterrupt:
1967 except KeyboardInterrupt:
1968 self.ui.warn(_("interrupted"))
1968 self.ui.warn(_("interrupted"))
1969 raise
1969 raise
1970 except Exception, inst:
1970 except Exception, inst:
1971 err(_("unpacking changeset %s: %s") % (short(n), inst))
1971 err(_("unpacking changeset %s: %s") % (short(n), inst))
1972 continue
1972 continue
1973
1973
1974 neededmanifests[changes[0]] = n
1974 neededmanifests[changes[0]] = n
1975
1975
1976 for f in changes[3]:
1976 for f in changes[3]:
1977 filelinkrevs.setdefault(f, []).append(i)
1977 filelinkrevs.setdefault(f, []).append(i)
1978
1978
1979 seen = {}
1979 seen = {}
1980 self.ui.status(_("checking manifests\n"))
1980 self.ui.status(_("checking manifests\n"))
1981 checkversion(self.manifest, "manifest")
1981 checkversion(self.manifest, "manifest")
1982 checksize(self.manifest, "manifest")
1982 checksize(self.manifest, "manifest")
1983
1983
1984 for i in range(self.manifest.count()):
1984 for i in range(self.manifest.count()):
1985 n = self.manifest.node(i)
1985 n = self.manifest.node(i)
1986 l = self.manifest.linkrev(n)
1986 l = self.manifest.linkrev(n)
1987
1987
1988 if l < 0 or l >= self.changelog.count():
1988 if l < 0 or l >= self.changelog.count():
1989 err(_("bad manifest link (%d) at revision %d") % (l, i))
1989 err(_("bad manifest link (%d) at revision %d") % (l, i))
1990
1990
1991 if n in neededmanifests:
1991 if n in neededmanifests:
1992 del neededmanifests[n]
1992 del neededmanifests[n]
1993
1993
1994 if n in seen:
1994 if n in seen:
1995 err(_("duplicate manifest at revision %d") % i)
1995 err(_("duplicate manifest at revision %d") % i)
1996
1996
1997 seen[n] = 1
1997 seen[n] = 1
1998
1998
1999 for p in self.manifest.parents(n):
1999 for p in self.manifest.parents(n):
2000 if p not in self.manifest.nodemap:
2000 if p not in self.manifest.nodemap:
2001 err(_("manifest %s has unknown parent %s") %
2001 err(_("manifest %s has unknown parent %s") %
2002 (short(n), short(p)))
2002 (short(n), short(p)))
2003
2003
2004 try:
2004 try:
2005 delta = mdiff.patchtext(self.manifest.delta(n))
2005 delta = mdiff.patchtext(self.manifest.delta(n))
2006 except KeyboardInterrupt:
2006 except KeyboardInterrupt:
2007 self.ui.warn(_("interrupted"))
2007 self.ui.warn(_("interrupted"))
2008 raise
2008 raise
2009 except Exception, inst:
2009 except Exception, inst:
2010 err(_("unpacking manifest %s: %s") % (short(n), inst))
2010 err(_("unpacking manifest %s: %s") % (short(n), inst))
2011 continue
2011 continue
2012
2012
2013 try:
2013 try:
2014 ff = [ l.split('\0') for l in delta.splitlines() ]
2014 ff = [ l.split('\0') for l in delta.splitlines() ]
2015 for f, fn in ff:
2015 for f, fn in ff:
2016 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2016 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2017 except (ValueError, TypeError), inst:
2017 except (ValueError, TypeError), inst:
2018 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2018 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2019
2019
2020 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2020 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2021
2021
2022 for m, c in neededmanifests.items():
2022 for m, c in neededmanifests.items():
2023 err(_("Changeset %s refers to unknown manifest %s") %
2023 err(_("Changeset %s refers to unknown manifest %s") %
2024 (short(m), short(c)))
2024 (short(m), short(c)))
2025 del neededmanifests
2025 del neededmanifests
2026
2026
2027 for f in filenodes:
2027 for f in filenodes:
2028 if f not in filelinkrevs:
2028 if f not in filelinkrevs:
2029 err(_("file %s in manifest but not in changesets") % f)
2029 err(_("file %s in manifest but not in changesets") % f)
2030
2030
2031 for f in filelinkrevs:
2031 for f in filelinkrevs:
2032 if f not in filenodes:
2032 if f not in filenodes:
2033 err(_("file %s in changeset but not in manifest") % f)
2033 err(_("file %s in changeset but not in manifest") % f)
2034
2034
2035 self.ui.status(_("checking files\n"))
2035 self.ui.status(_("checking files\n"))
2036 ff = filenodes.keys()
2036 ff = filenodes.keys()
2037 ff.sort()
2037 ff.sort()
2038 for f in ff:
2038 for f in ff:
2039 if f == "/dev/null":
2039 if f == "/dev/null":
2040 continue
2040 continue
2041 files += 1
2041 files += 1
2042 if not f:
2042 if not f:
2043 err(_("file without name in manifest %s") % short(n))
2043 err(_("file without name in manifest %s") % short(n))
2044 continue
2044 continue
2045 fl = self.file(f)
2045 fl = self.file(f)
2046 checkversion(fl, f)
2046 checkversion(fl, f)
2047 checksize(fl, f)
2047 checksize(fl, f)
2048
2048
2049 nodes = {nullid: 1}
2049 nodes = {nullid: 1}
2050 seen = {}
2050 seen = {}
2051 for i in range(fl.count()):
2051 for i in range(fl.count()):
2052 revisions += 1
2052 revisions += 1
2053 n = fl.node(i)
2053 n = fl.node(i)
2054
2054
2055 if n in seen:
2055 if n in seen:
2056 err(_("%s: duplicate revision %d") % (f, i))
2056 err(_("%s: duplicate revision %d") % (f, i))
2057 if n not in filenodes[f]:
2057 if n not in filenodes[f]:
2058 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2058 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2059 else:
2059 else:
2060 del filenodes[f][n]
2060 del filenodes[f][n]
2061
2061
2062 flr = fl.linkrev(n)
2062 flr = fl.linkrev(n)
2063 if flr not in filelinkrevs.get(f, []):
2063 if flr not in filelinkrevs.get(f, []):
2064 err(_("%s:%s points to unexpected changeset %d")
2064 err(_("%s:%s points to unexpected changeset %d")
2065 % (f, short(n), flr))
2065 % (f, short(n), flr))
2066 else:
2066 else:
2067 filelinkrevs[f].remove(flr)
2067 filelinkrevs[f].remove(flr)
2068
2068
2069 # verify contents
2069 # verify contents
2070 try:
2070 try:
2071 t = fl.read(n)
2071 t = fl.read(n)
2072 except KeyboardInterrupt:
2072 except KeyboardInterrupt:
2073 self.ui.warn(_("interrupted"))
2073 self.ui.warn(_("interrupted"))
2074 raise
2074 raise
2075 except Exception, inst:
2075 except Exception, inst:
2076 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2076 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2077
2077
2078 # verify parents
2078 # verify parents
2079 (p1, p2) = fl.parents(n)
2079 (p1, p2) = fl.parents(n)
2080 if p1 not in nodes:
2080 if p1 not in nodes:
2081 err(_("file %s:%s unknown parent 1 %s") %
2081 err(_("file %s:%s unknown parent 1 %s") %
2082 (f, short(n), short(p1)))
2082 (f, short(n), short(p1)))
2083 if p2 not in nodes:
2083 if p2 not in nodes:
2084 err(_("file %s:%s unknown parent 2 %s") %
2084 err(_("file %s:%s unknown parent 2 %s") %
2085 (f, short(n), short(p1)))
2085 (f, short(n), short(p1)))
2086 nodes[n] = 1
2086 nodes[n] = 1
2087
2087
2088 # cross-check
2088 # cross-check
2089 for node in filenodes[f]:
2089 for node in filenodes[f]:
2090 err(_("node %s in manifests not in %s") % (hex(node), f))
2090 err(_("node %s in manifests not in %s") % (hex(node), f))
2091
2091
2092 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2092 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2093 (files, changesets, revisions))
2093 (files, changesets, revisions))
2094
2094
2095 if warnings[0]:
2095 if warnings[0]:
2096 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2096 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2097 if errors[0]:
2097 if errors[0]:
2098 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2098 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2099 return 1
2099 return 1
2100
2100
2101 # used to avoid circular references so destructors work
2101 # used to avoid circular references so destructors work
2102 def aftertrans(base):
2102 def aftertrans(base):
2103 p = base
2103 p = base
2104 def a():
2104 def a():
2105 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2105 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2106 util.rename(os.path.join(p, "journal.dirstate"),
2106 util.rename(os.path.join(p, "journal.dirstate"),
2107 os.path.join(p, "undo.dirstate"))
2107 os.path.join(p, "undo.dirstate"))
2108 return a
2108 return a
2109
2109
General Comments 0
You need to be logged in to leave comments. Login now