##// END OF EJS Templates
fix bug in localrepo.changes....
Vadim Gelfer -
r2478:287b7da4 default
parent child Browse files
Show More
@@ -1,2151 +1,2152
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 demandload(globals(), "appendfile changegroup")
11 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "changelog dirstate filelog manifest repo")
12 demandload(globals(), "changelog dirstate filelog manifest repo")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "os revlog util")
14 demandload(globals(), "os revlog util")
15
15
16 class localrepository(object):
16 class localrepository(object):
17 capabilities = ()
17 capabilities = ()
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 if not path:
22 if not path:
23 p = os.getcwd()
23 p = os.getcwd()
24 while not os.path.isdir(os.path.join(p, ".hg")):
24 while not os.path.isdir(os.path.join(p, ".hg")):
25 oldp = p
25 oldp = p
26 p = os.path.dirname(p)
26 p = os.path.dirname(p)
27 if p == oldp:
27 if p == oldp:
28 raise repo.RepoError(_("no repo found"))
28 raise repo.RepoError(_("no repo found"))
29 path = p
29 path = p
30 self.path = os.path.join(path, ".hg")
30 self.path = os.path.join(path, ".hg")
31
31
32 if not create and not os.path.isdir(self.path):
32 if not create and not os.path.isdir(self.path):
33 raise repo.RepoError(_("repository %s not found") % path)
33 raise repo.RepoError(_("repository %s not found") % path)
34
34
35 self.root = os.path.abspath(path)
35 self.root = os.path.abspath(path)
36 self.origroot = path
36 self.origroot = path
37 self.ui = ui.ui(parentui=parentui)
37 self.ui = ui.ui(parentui=parentui)
38 self.opener = util.opener(self.path)
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
39 self.wopener = util.opener(self.root)
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 v = self.ui.revlogopts
46 v = self.ui.revlogopts
47 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
48 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
49 fl = v.get('flags', None)
49 fl = v.get('flags', None)
50 flags = 0
50 flags = 0
51 if fl != None:
51 if fl != None:
52 for x in fl.split():
52 for x in fl.split():
53 flags |= revlog.flagstr(x)
53 flags |= revlog.flagstr(x)
54 elif self.revlogv1:
54 elif self.revlogv1:
55 flags = revlog.REVLOG_DEFAULT_FLAGS
55 flags = revlog.REVLOG_DEFAULT_FLAGS
56
56
57 v = self.revlogversion | flags
57 v = self.revlogversion | flags
58 self.manifest = manifest.manifest(self.opener, v)
58 self.manifest = manifest.manifest(self.opener, v)
59 self.changelog = changelog.changelog(self.opener, v)
59 self.changelog = changelog.changelog(self.opener, v)
60
60
61 # the changelog might not have the inline index flag
61 # the changelog might not have the inline index flag
62 # on. If the format of the changelog is the same as found in
62 # on. If the format of the changelog is the same as found in
63 # .hgrc, apply any flags found in the .hgrc as well.
63 # .hgrc, apply any flags found in the .hgrc as well.
64 # Otherwise, just version from the changelog
64 # Otherwise, just version from the changelog
65 v = self.changelog.version
65 v = self.changelog.version
66 if v == self.revlogversion:
66 if v == self.revlogversion:
67 v |= flags
67 v |= flags
68 self.revlogversion = v
68 self.revlogversion = v
69
69
70 self.tagscache = None
70 self.tagscache = None
71 self.nodetagscache = None
71 self.nodetagscache = None
72 self.encodepats = None
72 self.encodepats = None
73 self.decodepats = None
73 self.decodepats = None
74 self.transhandle = None
74 self.transhandle = None
75
75
76 if create:
76 if create:
77 os.mkdir(self.path)
77 os.mkdir(self.path)
78 os.mkdir(self.join("data"))
78 os.mkdir(self.join("data"))
79
79
80 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
81
81
82 def hook(self, name, throw=False, **args):
82 def hook(self, name, throw=False, **args):
83 def callhook(hname, funcname):
83 def callhook(hname, funcname):
84 '''call python hook. hook is callable object, looked up as
84 '''call python hook. hook is callable object, looked up as
85 name in python module. if callable returns "true", hook
85 name in python module. if callable returns "true", hook
86 fails, else passes. if hook raises exception, treated as
86 fails, else passes. if hook raises exception, treated as
87 hook failure. exception propagates if throw is "true".
87 hook failure. exception propagates if throw is "true".
88
88
89 reason for "true" meaning "hook failed" is so that
89 reason for "true" meaning "hook failed" is so that
90 unmodified commands (e.g. mercurial.commands.update) can
90 unmodified commands (e.g. mercurial.commands.update) can
91 be run as hooks without wrappers to convert return values.'''
91 be run as hooks without wrappers to convert return values.'''
92
92
93 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
94 d = funcname.rfind('.')
94 d = funcname.rfind('.')
95 if d == -1:
95 if d == -1:
96 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
97 % (hname, funcname))
97 % (hname, funcname))
98 modname = funcname[:d]
98 modname = funcname[:d]
99 try:
99 try:
100 obj = __import__(modname)
100 obj = __import__(modname)
101 except ImportError:
101 except ImportError:
102 raise util.Abort(_('%s hook is invalid '
102 raise util.Abort(_('%s hook is invalid '
103 '(import of "%s" failed)') %
103 '(import of "%s" failed)') %
104 (hname, modname))
104 (hname, modname))
105 try:
105 try:
106 for p in funcname.split('.')[1:]:
106 for p in funcname.split('.')[1:]:
107 obj = getattr(obj, p)
107 obj = getattr(obj, p)
108 except AttributeError, err:
108 except AttributeError, err:
109 raise util.Abort(_('%s hook is invalid '
109 raise util.Abort(_('%s hook is invalid '
110 '("%s" is not defined)') %
110 '("%s" is not defined)') %
111 (hname, funcname))
111 (hname, funcname))
112 if not callable(obj):
112 if not callable(obj):
113 raise util.Abort(_('%s hook is invalid '
113 raise util.Abort(_('%s hook is invalid '
114 '("%s" is not callable)') %
114 '("%s" is not callable)') %
115 (hname, funcname))
115 (hname, funcname))
116 try:
116 try:
117 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
118 except (KeyboardInterrupt, util.SignalInterrupt):
118 except (KeyboardInterrupt, util.SignalInterrupt):
119 raise
119 raise
120 except Exception, exc:
120 except Exception, exc:
121 if isinstance(exc, util.Abort):
121 if isinstance(exc, util.Abort):
122 self.ui.warn(_('error: %s hook failed: %s\n') %
122 self.ui.warn(_('error: %s hook failed: %s\n') %
123 (hname, exc.args[0] % exc.args[1:]))
123 (hname, exc.args[0] % exc.args[1:]))
124 else:
124 else:
125 self.ui.warn(_('error: %s hook raised an exception: '
125 self.ui.warn(_('error: %s hook raised an exception: '
126 '%s\n') % (hname, exc))
126 '%s\n') % (hname, exc))
127 if throw:
127 if throw:
128 raise
128 raise
129 self.ui.print_exc()
129 self.ui.print_exc()
130 return True
130 return True
131 if r:
131 if r:
132 if throw:
132 if throw:
133 raise util.Abort(_('%s hook failed') % hname)
133 raise util.Abort(_('%s hook failed') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 return r
135 return r
136
136
137 def runhook(name, cmd):
137 def runhook(name, cmd):
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
140 r = util.system(cmd, environ=env, cwd=self.root)
140 r = util.system(cmd, environ=env, cwd=self.root)
141 if r:
141 if r:
142 desc, r = util.explain_exit(r)
142 desc, r = util.explain_exit(r)
143 if throw:
143 if throw:
144 raise util.Abort(_('%s hook %s') % (name, desc))
144 raise util.Abort(_('%s hook %s') % (name, desc))
145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 return r
146 return r
147
147
148 r = False
148 r = False
149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 if hname.split(".", 1)[0] == name and cmd]
150 if hname.split(".", 1)[0] == name and cmd]
151 hooks.sort()
151 hooks.sort()
152 for hname, cmd in hooks:
152 for hname, cmd in hooks:
153 if cmd.startswith('python:'):
153 if cmd.startswith('python:'):
154 r = callhook(hname, cmd[7:].strip()) or r
154 r = callhook(hname, cmd[7:].strip()) or r
155 else:
155 else:
156 r = runhook(hname, cmd) or r
156 r = runhook(hname, cmd) or r
157 return r
157 return r
158
158
159 def tags(self):
159 def tags(self):
160 '''return a mapping of tag to node'''
160 '''return a mapping of tag to node'''
161 if not self.tagscache:
161 if not self.tagscache:
162 self.tagscache = {}
162 self.tagscache = {}
163
163
164 def parsetag(line, context):
164 def parsetag(line, context):
165 if not line:
165 if not line:
166 return
166 return
167 s = l.split(" ", 1)
167 s = l.split(" ", 1)
168 if len(s) != 2:
168 if len(s) != 2:
169 self.ui.warn(_("%s: cannot parse entry\n") % context)
169 self.ui.warn(_("%s: cannot parse entry\n") % context)
170 return
170 return
171 node, key = s
171 node, key = s
172 key = key.strip()
172 key = key.strip()
173 try:
173 try:
174 bin_n = bin(node)
174 bin_n = bin(node)
175 except TypeError:
175 except TypeError:
176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
177 (context, node))
177 (context, node))
178 return
178 return
179 if bin_n not in self.changelog.nodemap:
179 if bin_n not in self.changelog.nodemap:
180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
181 (context, key))
181 (context, key))
182 return
182 return
183 self.tagscache[key] = bin_n
183 self.tagscache[key] = bin_n
184
184
185 # read the tags file from each head, ending with the tip,
185 # read the tags file from each head, ending with the tip,
186 # and add each tag found to the map, with "newer" ones
186 # and add each tag found to the map, with "newer" ones
187 # taking precedence
187 # taking precedence
188 heads = self.heads()
188 heads = self.heads()
189 heads.reverse()
189 heads.reverse()
190 fl = self.file(".hgtags")
190 fl = self.file(".hgtags")
191 for node in heads:
191 for node in heads:
192 change = self.changelog.read(node)
192 change = self.changelog.read(node)
193 rev = self.changelog.rev(node)
193 rev = self.changelog.rev(node)
194 fn, ff = self.manifest.find(change[0], '.hgtags')
194 fn, ff = self.manifest.find(change[0], '.hgtags')
195 if fn is None: continue
195 if fn is None: continue
196 count = 0
196 count = 0
197 for l in fl.read(fn).splitlines():
197 for l in fl.read(fn).splitlines():
198 count += 1
198 count += 1
199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
200 (rev, short(node), count))
200 (rev, short(node), count))
201 try:
201 try:
202 f = self.opener("localtags")
202 f = self.opener("localtags")
203 count = 0
203 count = 0
204 for l in f:
204 for l in f:
205 count += 1
205 count += 1
206 parsetag(l, _("localtags, line %d") % count)
206 parsetag(l, _("localtags, line %d") % count)
207 except IOError:
207 except IOError:
208 pass
208 pass
209
209
210 self.tagscache['tip'] = self.changelog.tip()
210 self.tagscache['tip'] = self.changelog.tip()
211
211
212 return self.tagscache
212 return self.tagscache
213
213
214 def tagslist(self):
214 def tagslist(self):
215 '''return a list of tags ordered by revision'''
215 '''return a list of tags ordered by revision'''
216 l = []
216 l = []
217 for t, n in self.tags().items():
217 for t, n in self.tags().items():
218 try:
218 try:
219 r = self.changelog.rev(n)
219 r = self.changelog.rev(n)
220 except:
220 except:
221 r = -2 # sort to the beginning of the list if unknown
221 r = -2 # sort to the beginning of the list if unknown
222 l.append((r, t, n))
222 l.append((r, t, n))
223 l.sort()
223 l.sort()
224 return [(t, n) for r, t, n in l]
224 return [(t, n) for r, t, n in l]
225
225
226 def nodetags(self, node):
226 def nodetags(self, node):
227 '''return the tags associated with a node'''
227 '''return the tags associated with a node'''
228 if not self.nodetagscache:
228 if not self.nodetagscache:
229 self.nodetagscache = {}
229 self.nodetagscache = {}
230 for t, n in self.tags().items():
230 for t, n in self.tags().items():
231 self.nodetagscache.setdefault(n, []).append(t)
231 self.nodetagscache.setdefault(n, []).append(t)
232 return self.nodetagscache.get(node, [])
232 return self.nodetagscache.get(node, [])
233
233
234 def lookup(self, key):
234 def lookup(self, key):
235 try:
235 try:
236 return self.tags()[key]
236 return self.tags()[key]
237 except KeyError:
237 except KeyError:
238 try:
238 try:
239 return self.changelog.lookup(key)
239 return self.changelog.lookup(key)
240 except:
240 except:
241 raise repo.RepoError(_("unknown revision '%s'") % key)
241 raise repo.RepoError(_("unknown revision '%s'") % key)
242
242
243 def dev(self):
243 def dev(self):
244 return os.lstat(self.path).st_dev
244 return os.lstat(self.path).st_dev
245
245
246 def local(self):
246 def local(self):
247 return True
247 return True
248
248
249 def join(self, f):
249 def join(self, f):
250 return os.path.join(self.path, f)
250 return os.path.join(self.path, f)
251
251
252 def wjoin(self, f):
252 def wjoin(self, f):
253 return os.path.join(self.root, f)
253 return os.path.join(self.root, f)
254
254
255 def file(self, f):
255 def file(self, f):
256 if f[0] == '/':
256 if f[0] == '/':
257 f = f[1:]
257 f = f[1:]
258 return filelog.filelog(self.opener, f, self.revlogversion)
258 return filelog.filelog(self.opener, f, self.revlogversion)
259
259
260 def getcwd(self):
260 def getcwd(self):
261 return self.dirstate.getcwd()
261 return self.dirstate.getcwd()
262
262
263 def wfile(self, f, mode='r'):
263 def wfile(self, f, mode='r'):
264 return self.wopener(f, mode)
264 return self.wopener(f, mode)
265
265
266 def wread(self, filename):
266 def wread(self, filename):
267 if self.encodepats == None:
267 if self.encodepats == None:
268 l = []
268 l = []
269 for pat, cmd in self.ui.configitems("encode"):
269 for pat, cmd in self.ui.configitems("encode"):
270 mf = util.matcher(self.root, "", [pat], [], [])[1]
270 mf = util.matcher(self.root, "", [pat], [], [])[1]
271 l.append((mf, cmd))
271 l.append((mf, cmd))
272 self.encodepats = l
272 self.encodepats = l
273
273
274 data = self.wopener(filename, 'r').read()
274 data = self.wopener(filename, 'r').read()
275
275
276 for mf, cmd in self.encodepats:
276 for mf, cmd in self.encodepats:
277 if mf(filename):
277 if mf(filename):
278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
279 data = util.filter(data, cmd)
279 data = util.filter(data, cmd)
280 break
280 break
281
281
282 return data
282 return data
283
283
284 def wwrite(self, filename, data, fd=None):
284 def wwrite(self, filename, data, fd=None):
285 if self.decodepats == None:
285 if self.decodepats == None:
286 l = []
286 l = []
287 for pat, cmd in self.ui.configitems("decode"):
287 for pat, cmd in self.ui.configitems("decode"):
288 mf = util.matcher(self.root, "", [pat], [], [])[1]
288 mf = util.matcher(self.root, "", [pat], [], [])[1]
289 l.append((mf, cmd))
289 l.append((mf, cmd))
290 self.decodepats = l
290 self.decodepats = l
291
291
292 for mf, cmd in self.decodepats:
292 for mf, cmd in self.decodepats:
293 if mf(filename):
293 if mf(filename):
294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
295 data = util.filter(data, cmd)
295 data = util.filter(data, cmd)
296 break
296 break
297
297
298 if fd:
298 if fd:
299 return fd.write(data)
299 return fd.write(data)
300 return self.wopener(filename, 'w').write(data)
300 return self.wopener(filename, 'w').write(data)
301
301
302 def transaction(self):
302 def transaction(self):
303 tr = self.transhandle
303 tr = self.transhandle
304 if tr != None and tr.running():
304 if tr != None and tr.running():
305 return tr.nest()
305 return tr.nest()
306
306
307 # save dirstate for rollback
307 # save dirstate for rollback
308 try:
308 try:
309 ds = self.opener("dirstate").read()
309 ds = self.opener("dirstate").read()
310 except IOError:
310 except IOError:
311 ds = ""
311 ds = ""
312 self.opener("journal.dirstate", "w").write(ds)
312 self.opener("journal.dirstate", "w").write(ds)
313
313
314 tr = transaction.transaction(self.ui.warn, self.opener,
314 tr = transaction.transaction(self.ui.warn, self.opener,
315 self.join("journal"),
315 self.join("journal"),
316 aftertrans(self.path))
316 aftertrans(self.path))
317 self.transhandle = tr
317 self.transhandle = tr
318 return tr
318 return tr
319
319
320 def recover(self):
320 def recover(self):
321 l = self.lock()
321 l = self.lock()
322 if os.path.exists(self.join("journal")):
322 if os.path.exists(self.join("journal")):
323 self.ui.status(_("rolling back interrupted transaction\n"))
323 self.ui.status(_("rolling back interrupted transaction\n"))
324 transaction.rollback(self.opener, self.join("journal"))
324 transaction.rollback(self.opener, self.join("journal"))
325 self.reload()
325 self.reload()
326 return True
326 return True
327 else:
327 else:
328 self.ui.warn(_("no interrupted transaction available\n"))
328 self.ui.warn(_("no interrupted transaction available\n"))
329 return False
329 return False
330
330
331 def rollback(self, wlock=None):
331 def rollback(self, wlock=None):
332 if not wlock:
332 if not wlock:
333 wlock = self.wlock()
333 wlock = self.wlock()
334 l = self.lock()
334 l = self.lock()
335 if os.path.exists(self.join("undo")):
335 if os.path.exists(self.join("undo")):
336 self.ui.status(_("rolling back last transaction\n"))
336 self.ui.status(_("rolling back last transaction\n"))
337 transaction.rollback(self.opener, self.join("undo"))
337 transaction.rollback(self.opener, self.join("undo"))
338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
339 self.reload()
339 self.reload()
340 self.wreload()
340 self.wreload()
341 else:
341 else:
342 self.ui.warn(_("no rollback information available\n"))
342 self.ui.warn(_("no rollback information available\n"))
343
343
344 def wreload(self):
344 def wreload(self):
345 self.dirstate.read()
345 self.dirstate.read()
346
346
347 def reload(self):
347 def reload(self):
348 self.changelog.load()
348 self.changelog.load()
349 self.manifest.load()
349 self.manifest.load()
350 self.tagscache = None
350 self.tagscache = None
351 self.nodetagscache = None
351 self.nodetagscache = None
352
352
353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
354 desc=None):
354 desc=None):
355 try:
355 try:
356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
357 except lock.LockHeld, inst:
357 except lock.LockHeld, inst:
358 if not wait:
358 if not wait:
359 raise
359 raise
360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
361 (desc, inst.args[0]))
361 (desc, inst.args[0]))
362 # default to 600 seconds timeout
362 # default to 600 seconds timeout
363 l = lock.lock(self.join(lockname),
363 l = lock.lock(self.join(lockname),
364 int(self.ui.config("ui", "timeout") or 600),
364 int(self.ui.config("ui", "timeout") or 600),
365 releasefn, desc=desc)
365 releasefn, desc=desc)
366 if acquirefn:
366 if acquirefn:
367 acquirefn()
367 acquirefn()
368 return l
368 return l
369
369
370 def lock(self, wait=1):
370 def lock(self, wait=1):
371 return self.do_lock("lock", wait, acquirefn=self.reload,
371 return self.do_lock("lock", wait, acquirefn=self.reload,
372 desc=_('repository %s') % self.origroot)
372 desc=_('repository %s') % self.origroot)
373
373
374 def wlock(self, wait=1):
374 def wlock(self, wait=1):
375 return self.do_lock("wlock", wait, self.dirstate.write,
375 return self.do_lock("wlock", wait, self.dirstate.write,
376 self.wreload,
376 self.wreload,
377 desc=_('working directory of %s') % self.origroot)
377 desc=_('working directory of %s') % self.origroot)
378
378
379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
380 "determine whether a new filenode is needed"
380 "determine whether a new filenode is needed"
381 fp1 = manifest1.get(filename, nullid)
381 fp1 = manifest1.get(filename, nullid)
382 fp2 = manifest2.get(filename, nullid)
382 fp2 = manifest2.get(filename, nullid)
383
383
384 if fp2 != nullid:
384 if fp2 != nullid:
385 # is one parent an ancestor of the other?
385 # is one parent an ancestor of the other?
386 fpa = filelog.ancestor(fp1, fp2)
386 fpa = filelog.ancestor(fp1, fp2)
387 if fpa == fp1:
387 if fpa == fp1:
388 fp1, fp2 = fp2, nullid
388 fp1, fp2 = fp2, nullid
389 elif fpa == fp2:
389 elif fpa == fp2:
390 fp2 = nullid
390 fp2 = nullid
391
391
392 # is the file unmodified from the parent? report existing entry
392 # is the file unmodified from the parent? report existing entry
393 if fp2 == nullid and text == filelog.read(fp1):
393 if fp2 == nullid and text == filelog.read(fp1):
394 return (fp1, None, None)
394 return (fp1, None, None)
395
395
396 return (None, fp1, fp2)
396 return (None, fp1, fp2)
397
397
398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
399 orig_parent = self.dirstate.parents()[0] or nullid
399 orig_parent = self.dirstate.parents()[0] or nullid
400 p1 = p1 or self.dirstate.parents()[0] or nullid
400 p1 = p1 or self.dirstate.parents()[0] or nullid
401 p2 = p2 or self.dirstate.parents()[1] or nullid
401 p2 = p2 or self.dirstate.parents()[1] or nullid
402 c1 = self.changelog.read(p1)
402 c1 = self.changelog.read(p1)
403 c2 = self.changelog.read(p2)
403 c2 = self.changelog.read(p2)
404 m1 = self.manifest.read(c1[0])
404 m1 = self.manifest.read(c1[0])
405 mf1 = self.manifest.readflags(c1[0])
405 mf1 = self.manifest.readflags(c1[0])
406 m2 = self.manifest.read(c2[0])
406 m2 = self.manifest.read(c2[0])
407 changed = []
407 changed = []
408
408
409 if orig_parent == p1:
409 if orig_parent == p1:
410 update_dirstate = 1
410 update_dirstate = 1
411 else:
411 else:
412 update_dirstate = 0
412 update_dirstate = 0
413
413
414 if not wlock:
414 if not wlock:
415 wlock = self.wlock()
415 wlock = self.wlock()
416 l = self.lock()
416 l = self.lock()
417 tr = self.transaction()
417 tr = self.transaction()
418 mm = m1.copy()
418 mm = m1.copy()
419 mfm = mf1.copy()
419 mfm = mf1.copy()
420 linkrev = self.changelog.count()
420 linkrev = self.changelog.count()
421 for f in files:
421 for f in files:
422 try:
422 try:
423 t = self.wread(f)
423 t = self.wread(f)
424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
425 r = self.file(f)
425 r = self.file(f)
426 mfm[f] = tm
426 mfm[f] = tm
427
427
428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
429 if entry:
429 if entry:
430 mm[f] = entry
430 mm[f] = entry
431 continue
431 continue
432
432
433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
434 changed.append(f)
434 changed.append(f)
435 if update_dirstate:
435 if update_dirstate:
436 self.dirstate.update([f], "n")
436 self.dirstate.update([f], "n")
437 except IOError:
437 except IOError:
438 try:
438 try:
439 del mm[f]
439 del mm[f]
440 del mfm[f]
440 del mfm[f]
441 if update_dirstate:
441 if update_dirstate:
442 self.dirstate.forget([f])
442 self.dirstate.forget([f])
443 except:
443 except:
444 # deleted from p2?
444 # deleted from p2?
445 pass
445 pass
446
446
447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
448 user = user or self.ui.username()
448 user = user or self.ui.username()
449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
450 tr.close()
450 tr.close()
451 if update_dirstate:
451 if update_dirstate:
452 self.dirstate.setparents(n, nullid)
452 self.dirstate.setparents(n, nullid)
453
453
454 def commit(self, files=None, text="", user=None, date=None,
454 def commit(self, files=None, text="", user=None, date=None,
455 match=util.always, force=False, lock=None, wlock=None,
455 match=util.always, force=False, lock=None, wlock=None,
456 force_editor=False):
456 force_editor=False):
457 commit = []
457 commit = []
458 remove = []
458 remove = []
459 changed = []
459 changed = []
460
460
461 if files:
461 if files:
462 for f in files:
462 for f in files:
463 s = self.dirstate.state(f)
463 s = self.dirstate.state(f)
464 if s in 'nmai':
464 if s in 'nmai':
465 commit.append(f)
465 commit.append(f)
466 elif s == 'r':
466 elif s == 'r':
467 remove.append(f)
467 remove.append(f)
468 else:
468 else:
469 self.ui.warn(_("%s not tracked!\n") % f)
469 self.ui.warn(_("%s not tracked!\n") % f)
470 else:
470 else:
471 modified, added, removed, deleted, unknown = self.changes(match=match)
471 modified, added, removed, deleted, unknown = self.changes(match=match)
472 commit = modified + added
472 commit = modified + added
473 remove = removed
473 remove = removed
474
474
475 p1, p2 = self.dirstate.parents()
475 p1, p2 = self.dirstate.parents()
476 c1 = self.changelog.read(p1)
476 c1 = self.changelog.read(p1)
477 c2 = self.changelog.read(p2)
477 c2 = self.changelog.read(p2)
478 m1 = self.manifest.read(c1[0])
478 m1 = self.manifest.read(c1[0])
479 mf1 = self.manifest.readflags(c1[0])
479 mf1 = self.manifest.readflags(c1[0])
480 m2 = self.manifest.read(c2[0])
480 m2 = self.manifest.read(c2[0])
481
481
482 if not commit and not remove and not force and p2 == nullid:
482 if not commit and not remove and not force and p2 == nullid:
483 self.ui.status(_("nothing changed\n"))
483 self.ui.status(_("nothing changed\n"))
484 return None
484 return None
485
485
486 xp1 = hex(p1)
486 xp1 = hex(p1)
487 if p2 == nullid: xp2 = ''
487 if p2 == nullid: xp2 = ''
488 else: xp2 = hex(p2)
488 else: xp2 = hex(p2)
489
489
490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
491
491
492 if not wlock:
492 if not wlock:
493 wlock = self.wlock()
493 wlock = self.wlock()
494 if not lock:
494 if not lock:
495 lock = self.lock()
495 lock = self.lock()
496 tr = self.transaction()
496 tr = self.transaction()
497
497
498 # check in files
498 # check in files
499 new = {}
499 new = {}
500 linkrev = self.changelog.count()
500 linkrev = self.changelog.count()
501 commit.sort()
501 commit.sort()
502 for f in commit:
502 for f in commit:
503 self.ui.note(f + "\n")
503 self.ui.note(f + "\n")
504 try:
504 try:
505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
506 t = self.wread(f)
506 t = self.wread(f)
507 except IOError:
507 except IOError:
508 self.ui.warn(_("trouble committing %s!\n") % f)
508 self.ui.warn(_("trouble committing %s!\n") % f)
509 raise
509 raise
510
510
511 r = self.file(f)
511 r = self.file(f)
512
512
513 meta = {}
513 meta = {}
514 cp = self.dirstate.copied(f)
514 cp = self.dirstate.copied(f)
515 if cp:
515 if cp:
516 meta["copy"] = cp
516 meta["copy"] = cp
517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
519 fp1, fp2 = nullid, nullid
519 fp1, fp2 = nullid, nullid
520 else:
520 else:
521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
522 if entry:
522 if entry:
523 new[f] = entry
523 new[f] = entry
524 continue
524 continue
525
525
526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
527 # remember what we've added so that we can later calculate
527 # remember what we've added so that we can later calculate
528 # the files to pull from a set of changesets
528 # the files to pull from a set of changesets
529 changed.append(f)
529 changed.append(f)
530
530
531 # update manifest
531 # update manifest
532 m1 = m1.copy()
532 m1 = m1.copy()
533 m1.update(new)
533 m1.update(new)
534 for f in remove:
534 for f in remove:
535 if f in m1:
535 if f in m1:
536 del m1[f]
536 del m1[f]
537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
538 (new, remove))
538 (new, remove))
539
539
540 # add changeset
540 # add changeset
541 new = new.keys()
541 new = new.keys()
542 new.sort()
542 new.sort()
543
543
544 user = user or self.ui.username()
544 user = user or self.ui.username()
545 if not text or force_editor:
545 if not text or force_editor:
546 edittext = []
546 edittext = []
547 if text:
547 if text:
548 edittext.append(text)
548 edittext.append(text)
549 edittext.append("")
549 edittext.append("")
550 if p2 != nullid:
550 if p2 != nullid:
551 edittext.append("HG: branch merge")
551 edittext.append("HG: branch merge")
552 edittext.extend(["HG: changed %s" % f for f in changed])
552 edittext.extend(["HG: changed %s" % f for f in changed])
553 edittext.extend(["HG: removed %s" % f for f in remove])
553 edittext.extend(["HG: removed %s" % f for f in remove])
554 if not changed and not remove:
554 if not changed and not remove:
555 edittext.append("HG: no files changed")
555 edittext.append("HG: no files changed")
556 edittext.append("")
556 edittext.append("")
557 # run editor in the repository root
557 # run editor in the repository root
558 olddir = os.getcwd()
558 olddir = os.getcwd()
559 os.chdir(self.root)
559 os.chdir(self.root)
560 text = self.ui.edit("\n".join(edittext), user)
560 text = self.ui.edit("\n".join(edittext), user)
561 os.chdir(olddir)
561 os.chdir(olddir)
562
562
563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
564 while lines and not lines[0]:
564 while lines and not lines[0]:
565 del lines[0]
565 del lines[0]
566 if not lines:
566 if not lines:
567 return None
567 return None
568 text = '\n'.join(lines)
568 text = '\n'.join(lines)
569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
571 parent2=xp2)
571 parent2=xp2)
572 tr.close()
572 tr.close()
573
573
574 self.dirstate.setparents(n)
574 self.dirstate.setparents(n)
575 self.dirstate.update(new, "n")
575 self.dirstate.update(new, "n")
576 self.dirstate.forget(remove)
576 self.dirstate.forget(remove)
577
577
578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
579 return n
579 return n
580
580
581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
582 if node:
582 if node:
583 fdict = dict.fromkeys(files)
583 fdict = dict.fromkeys(files)
584 for fn in self.manifest.read(self.changelog.read(node)[0]):
584 for fn in self.manifest.read(self.changelog.read(node)[0]):
585 fdict.pop(fn, None)
585 fdict.pop(fn, None)
586 if match(fn):
586 if match(fn):
587 yield 'm', fn
587 yield 'm', fn
588 for fn in fdict:
588 for fn in fdict:
589 if badmatch and badmatch(fn):
589 if badmatch and badmatch(fn):
590 if match(fn):
590 if match(fn):
591 yield 'b', fn
591 yield 'b', fn
592 else:
592 else:
593 self.ui.warn(_('%s: No such file in rev %s\n') % (
593 self.ui.warn(_('%s: No such file in rev %s\n') % (
594 util.pathto(self.getcwd(), fn), short(node)))
594 util.pathto(self.getcwd(), fn), short(node)))
595 else:
595 else:
596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
597 yield src, fn
597 yield src, fn
598
598
599 def changes(self, node1=None, node2=None, files=[], match=util.always,
599 def changes(self, node1=None, node2=None, files=[], match=util.always,
600 wlock=None, show_ignored=None):
600 wlock=None, show_ignored=None):
601 """return changes between two nodes or node and working directory
601 """return changes between two nodes or node and working directory
602
602
603 If node1 is None, use the first dirstate parent instead.
603 If node1 is None, use the first dirstate parent instead.
604 If node2 is None, compare node1 with working directory.
604 If node2 is None, compare node1 with working directory.
605 """
605 """
606
606
607 def fcmp(fn, mf):
607 def fcmp(fn, mf):
608 t1 = self.wread(fn)
608 t1 = self.wread(fn)
609 t2 = self.file(fn).read(mf.get(fn, nullid))
609 t2 = self.file(fn).read(mf.get(fn, nullid))
610 return cmp(t1, t2)
610 return cmp(t1, t2)
611
611
612 def mfmatches(node):
612 def mfmatches(node):
613 change = self.changelog.read(node)
613 change = self.changelog.read(node)
614 mf = dict(self.manifest.read(change[0]))
614 mf = dict(self.manifest.read(change[0]))
615 for fn in mf.keys():
615 for fn in mf.keys():
616 if not match(fn):
616 if not match(fn):
617 del mf[fn]
617 del mf[fn]
618 return mf
618 return mf
619
619
620 modified, added, removed, deleted, unknown, ignored = [],[],[],[],[],[]
620 compareworking = False
621 compareworking = False
621 if not node1 or node1 == self.dirstate.parents()[0]:
622 if not node1 or node1 == self.dirstate.parents()[0]:
622 compareworking = True
623 compareworking = True
623
624
624 if not compareworking:
625 if not compareworking:
625 # read the manifest from node1 before the manifest from node2,
626 # read the manifest from node1 before the manifest from node2,
626 # so that we'll hit the manifest cache if we're going through
627 # so that we'll hit the manifest cache if we're going through
627 # all the revisions in parent->child order.
628 # all the revisions in parent->child order.
628 mf1 = mfmatches(node1)
629 mf1 = mfmatches(node1)
629
630
630 # are we comparing the working directory?
631 # are we comparing the working directory?
631 if not node2:
632 if not node2:
632 if not wlock:
633 if not wlock:
633 try:
634 try:
634 wlock = self.wlock(wait=0)
635 wlock = self.wlock(wait=0)
635 except lock.LockException:
636 except lock.LockException:
636 wlock = None
637 wlock = None
637 lookup, modified, added, removed, deleted, unknown, ignored = (
638 lookup, modified, added, removed, deleted, unknown, ignored = (
638 self.dirstate.changes(files, match, show_ignored))
639 self.dirstate.changes(files, match, show_ignored))
639
640
640 # are we comparing working dir against its parent?
641 # are we comparing working dir against its parent?
641 if compareworking:
642 if compareworking:
642 if lookup:
643 if lookup:
643 # do a full compare of any files that might have changed
644 # do a full compare of any files that might have changed
644 mf2 = mfmatches(self.dirstate.parents()[0])
645 mf2 = mfmatches(self.dirstate.parents()[0])
645 for f in lookup:
646 for f in lookup:
646 if fcmp(f, mf2):
647 if fcmp(f, mf2):
647 modified.append(f)
648 modified.append(f)
648 elif wlock is not None:
649 elif wlock is not None:
649 self.dirstate.update([f], "n")
650 self.dirstate.update([f], "n")
650 else:
651 else:
651 # we are comparing working dir against non-parent
652 # we are comparing working dir against non-parent
652 # generate a pseudo-manifest for the working dir
653 # generate a pseudo-manifest for the working dir
653 mf2 = mfmatches(self.dirstate.parents()[0])
654 mf2 = mfmatches(self.dirstate.parents()[0])
654 for f in lookup + modified + added:
655 for f in lookup + modified + added:
655 mf2[f] = ""
656 mf2[f] = ""
656 for f in removed:
657 for f in removed:
657 if f in mf2:
658 if f in mf2:
658 del mf2[f]
659 del mf2[f]
659 else:
660 else:
660 # we are comparing two revisions
661 # we are comparing two revisions
661 deleted, unknown, ignored = [], [], []
662 deleted, unknown, ignored = [], [], []
662 mf2 = mfmatches(node2)
663 mf2 = mfmatches(node2)
663
664
664 if not compareworking:
665 if not compareworking:
665 # flush lists from dirstate before comparing manifests
666 # flush lists from dirstate before comparing manifests
666 modified, added = [], []
667 modified, added = [], []
667
668
668 # make sure to sort the files so we talk to the disk in a
669 # make sure to sort the files so we talk to the disk in a
669 # reasonable order
670 # reasonable order
670 mf2keys = mf2.keys()
671 mf2keys = mf2.keys()
671 mf2keys.sort()
672 mf2keys.sort()
672 for fn in mf2keys:
673 for fn in mf2keys:
673 if mf1.has_key(fn):
674 if mf1.has_key(fn):
674 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
675 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
675 modified.append(fn)
676 modified.append(fn)
676 del mf1[fn]
677 del mf1[fn]
677 else:
678 else:
678 added.append(fn)
679 added.append(fn)
679
680
680 removed = mf1.keys()
681 removed = mf1.keys()
681
682
682 # sort and return results:
683 # sort and return results:
683 for l in modified, added, removed, deleted, unknown, ignored:
684 for l in modified, added, removed, deleted, unknown, ignored:
684 l.sort()
685 l.sort()
685 if show_ignored is None:
686 if show_ignored is None:
686 return (modified, added, removed, deleted, unknown)
687 return (modified, added, removed, deleted, unknown)
687 else:
688 else:
688 return (modified, added, removed, deleted, unknown, ignored)
689 return (modified, added, removed, deleted, unknown, ignored)
689
690
690 def add(self, list, wlock=None):
691 def add(self, list, wlock=None):
691 if not wlock:
692 if not wlock:
692 wlock = self.wlock()
693 wlock = self.wlock()
693 for f in list:
694 for f in list:
694 p = self.wjoin(f)
695 p = self.wjoin(f)
695 if not os.path.exists(p):
696 if not os.path.exists(p):
696 self.ui.warn(_("%s does not exist!\n") % f)
697 self.ui.warn(_("%s does not exist!\n") % f)
697 elif not os.path.isfile(p):
698 elif not os.path.isfile(p):
698 self.ui.warn(_("%s not added: only files supported currently\n")
699 self.ui.warn(_("%s not added: only files supported currently\n")
699 % f)
700 % f)
700 elif self.dirstate.state(f) in 'an':
701 elif self.dirstate.state(f) in 'an':
701 self.ui.warn(_("%s already tracked!\n") % f)
702 self.ui.warn(_("%s already tracked!\n") % f)
702 else:
703 else:
703 self.dirstate.update([f], "a")
704 self.dirstate.update([f], "a")
704
705
705 def forget(self, list, wlock=None):
706 def forget(self, list, wlock=None):
706 if not wlock:
707 if not wlock:
707 wlock = self.wlock()
708 wlock = self.wlock()
708 for f in list:
709 for f in list:
709 if self.dirstate.state(f) not in 'ai':
710 if self.dirstate.state(f) not in 'ai':
710 self.ui.warn(_("%s not added!\n") % f)
711 self.ui.warn(_("%s not added!\n") % f)
711 else:
712 else:
712 self.dirstate.forget([f])
713 self.dirstate.forget([f])
713
714
714 def remove(self, list, unlink=False, wlock=None):
715 def remove(self, list, unlink=False, wlock=None):
715 if unlink:
716 if unlink:
716 for f in list:
717 for f in list:
717 try:
718 try:
718 util.unlink(self.wjoin(f))
719 util.unlink(self.wjoin(f))
719 except OSError, inst:
720 except OSError, inst:
720 if inst.errno != errno.ENOENT:
721 if inst.errno != errno.ENOENT:
721 raise
722 raise
722 if not wlock:
723 if not wlock:
723 wlock = self.wlock()
724 wlock = self.wlock()
724 for f in list:
725 for f in list:
725 p = self.wjoin(f)
726 p = self.wjoin(f)
726 if os.path.exists(p):
727 if os.path.exists(p):
727 self.ui.warn(_("%s still exists!\n") % f)
728 self.ui.warn(_("%s still exists!\n") % f)
728 elif self.dirstate.state(f) == 'a':
729 elif self.dirstate.state(f) == 'a':
729 self.dirstate.forget([f])
730 self.dirstate.forget([f])
730 elif f not in self.dirstate:
731 elif f not in self.dirstate:
731 self.ui.warn(_("%s not tracked!\n") % f)
732 self.ui.warn(_("%s not tracked!\n") % f)
732 else:
733 else:
733 self.dirstate.update([f], "r")
734 self.dirstate.update([f], "r")
734
735
735 def undelete(self, list, wlock=None):
736 def undelete(self, list, wlock=None):
736 p = self.dirstate.parents()[0]
737 p = self.dirstate.parents()[0]
737 mn = self.changelog.read(p)[0]
738 mn = self.changelog.read(p)[0]
738 mf = self.manifest.readflags(mn)
739 mf = self.manifest.readflags(mn)
739 m = self.manifest.read(mn)
740 m = self.manifest.read(mn)
740 if not wlock:
741 if not wlock:
741 wlock = self.wlock()
742 wlock = self.wlock()
742 for f in list:
743 for f in list:
743 if self.dirstate.state(f) not in "r":
744 if self.dirstate.state(f) not in "r":
744 self.ui.warn("%s not removed!\n" % f)
745 self.ui.warn("%s not removed!\n" % f)
745 else:
746 else:
746 t = self.file(f).read(m[f])
747 t = self.file(f).read(m[f])
747 self.wwrite(f, t)
748 self.wwrite(f, t)
748 util.set_exec(self.wjoin(f), mf[f])
749 util.set_exec(self.wjoin(f), mf[f])
749 self.dirstate.update([f], "n")
750 self.dirstate.update([f], "n")
750
751
751 def copy(self, source, dest, wlock=None):
752 def copy(self, source, dest, wlock=None):
752 p = self.wjoin(dest)
753 p = self.wjoin(dest)
753 if not os.path.exists(p):
754 if not os.path.exists(p):
754 self.ui.warn(_("%s does not exist!\n") % dest)
755 self.ui.warn(_("%s does not exist!\n") % dest)
755 elif not os.path.isfile(p):
756 elif not os.path.isfile(p):
756 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
757 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
757 else:
758 else:
758 if not wlock:
759 if not wlock:
759 wlock = self.wlock()
760 wlock = self.wlock()
760 if self.dirstate.state(dest) == '?':
761 if self.dirstate.state(dest) == '?':
761 self.dirstate.update([dest], "a")
762 self.dirstate.update([dest], "a")
762 self.dirstate.copy(source, dest)
763 self.dirstate.copy(source, dest)
763
764
764 def heads(self, start=None):
765 def heads(self, start=None):
765 heads = self.changelog.heads(start)
766 heads = self.changelog.heads(start)
766 # sort the output in rev descending order
767 # sort the output in rev descending order
767 heads = [(-self.changelog.rev(h), h) for h in heads]
768 heads = [(-self.changelog.rev(h), h) for h in heads]
768 heads.sort()
769 heads.sort()
769 return [n for (r, n) in heads]
770 return [n for (r, n) in heads]
770
771
771 # branchlookup returns a dict giving a list of branches for
772 # branchlookup returns a dict giving a list of branches for
772 # each head. A branch is defined as the tag of a node or
773 # each head. A branch is defined as the tag of a node or
773 # the branch of the node's parents. If a node has multiple
774 # the branch of the node's parents. If a node has multiple
774 # branch tags, tags are eliminated if they are visible from other
775 # branch tags, tags are eliminated if they are visible from other
775 # branch tags.
776 # branch tags.
776 #
777 #
777 # So, for this graph: a->b->c->d->e
778 # So, for this graph: a->b->c->d->e
778 # \ /
779 # \ /
779 # aa -----/
780 # aa -----/
780 # a has tag 2.6.12
781 # a has tag 2.6.12
781 # d has tag 2.6.13
782 # d has tag 2.6.13
782 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
783 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
783 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
784 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
784 # from the list.
785 # from the list.
785 #
786 #
786 # It is possible that more than one head will have the same branch tag.
787 # It is possible that more than one head will have the same branch tag.
787 # callers need to check the result for multiple heads under the same
788 # callers need to check the result for multiple heads under the same
788 # branch tag if that is a problem for them (ie checkout of a specific
789 # branch tag if that is a problem for them (ie checkout of a specific
789 # branch).
790 # branch).
790 #
791 #
791 # passing in a specific branch will limit the depth of the search
792 # passing in a specific branch will limit the depth of the search
792 # through the parents. It won't limit the branches returned in the
793 # through the parents. It won't limit the branches returned in the
793 # result though.
794 # result though.
794 def branchlookup(self, heads=None, branch=None):
795 def branchlookup(self, heads=None, branch=None):
795 if not heads:
796 if not heads:
796 heads = self.heads()
797 heads = self.heads()
797 headt = [ h for h in heads ]
798 headt = [ h for h in heads ]
798 chlog = self.changelog
799 chlog = self.changelog
799 branches = {}
800 branches = {}
800 merges = []
801 merges = []
801 seenmerge = {}
802 seenmerge = {}
802
803
803 # traverse the tree once for each head, recording in the branches
804 # traverse the tree once for each head, recording in the branches
804 # dict which tags are visible from this head. The branches
805 # dict which tags are visible from this head. The branches
805 # dict also records which tags are visible from each tag
806 # dict also records which tags are visible from each tag
806 # while we traverse.
807 # while we traverse.
807 while headt or merges:
808 while headt or merges:
808 if merges:
809 if merges:
809 n, found = merges.pop()
810 n, found = merges.pop()
810 visit = [n]
811 visit = [n]
811 else:
812 else:
812 h = headt.pop()
813 h = headt.pop()
813 visit = [h]
814 visit = [h]
814 found = [h]
815 found = [h]
815 seen = {}
816 seen = {}
816 while visit:
817 while visit:
817 n = visit.pop()
818 n = visit.pop()
818 if n in seen:
819 if n in seen:
819 continue
820 continue
820 pp = chlog.parents(n)
821 pp = chlog.parents(n)
821 tags = self.nodetags(n)
822 tags = self.nodetags(n)
822 if tags:
823 if tags:
823 for x in tags:
824 for x in tags:
824 if x == 'tip':
825 if x == 'tip':
825 continue
826 continue
826 for f in found:
827 for f in found:
827 branches.setdefault(f, {})[n] = 1
828 branches.setdefault(f, {})[n] = 1
828 branches.setdefault(n, {})[n] = 1
829 branches.setdefault(n, {})[n] = 1
829 break
830 break
830 if n not in found:
831 if n not in found:
831 found.append(n)
832 found.append(n)
832 if branch in tags:
833 if branch in tags:
833 continue
834 continue
834 seen[n] = 1
835 seen[n] = 1
835 if pp[1] != nullid and n not in seenmerge:
836 if pp[1] != nullid and n not in seenmerge:
836 merges.append((pp[1], [x for x in found]))
837 merges.append((pp[1], [x for x in found]))
837 seenmerge[n] = 1
838 seenmerge[n] = 1
838 if pp[0] != nullid:
839 if pp[0] != nullid:
839 visit.append(pp[0])
840 visit.append(pp[0])
840 # traverse the branches dict, eliminating branch tags from each
841 # traverse the branches dict, eliminating branch tags from each
841 # head that are visible from another branch tag for that head.
842 # head that are visible from another branch tag for that head.
842 out = {}
843 out = {}
843 viscache = {}
844 viscache = {}
844 for h in heads:
845 for h in heads:
845 def visible(node):
846 def visible(node):
846 if node in viscache:
847 if node in viscache:
847 return viscache[node]
848 return viscache[node]
848 ret = {}
849 ret = {}
849 visit = [node]
850 visit = [node]
850 while visit:
851 while visit:
851 x = visit.pop()
852 x = visit.pop()
852 if x in viscache:
853 if x in viscache:
853 ret.update(viscache[x])
854 ret.update(viscache[x])
854 elif x not in ret:
855 elif x not in ret:
855 ret[x] = 1
856 ret[x] = 1
856 if x in branches:
857 if x in branches:
857 visit[len(visit):] = branches[x].keys()
858 visit[len(visit):] = branches[x].keys()
858 viscache[node] = ret
859 viscache[node] = ret
859 return ret
860 return ret
860 if h not in branches:
861 if h not in branches:
861 continue
862 continue
862 # O(n^2), but somewhat limited. This only searches the
863 # O(n^2), but somewhat limited. This only searches the
863 # tags visible from a specific head, not all the tags in the
864 # tags visible from a specific head, not all the tags in the
864 # whole repo.
865 # whole repo.
865 for b in branches[h]:
866 for b in branches[h]:
866 vis = False
867 vis = False
867 for bb in branches[h].keys():
868 for bb in branches[h].keys():
868 if b != bb:
869 if b != bb:
869 if b in visible(bb):
870 if b in visible(bb):
870 vis = True
871 vis = True
871 break
872 break
872 if not vis:
873 if not vis:
873 l = out.setdefault(h, [])
874 l = out.setdefault(h, [])
874 l[len(l):] = self.nodetags(b)
875 l[len(l):] = self.nodetags(b)
875 return out
876 return out
876
877
877 def branches(self, nodes):
878 def branches(self, nodes):
878 if not nodes:
879 if not nodes:
879 nodes = [self.changelog.tip()]
880 nodes = [self.changelog.tip()]
880 b = []
881 b = []
881 for n in nodes:
882 for n in nodes:
882 t = n
883 t = n
883 while 1:
884 while 1:
884 p = self.changelog.parents(n)
885 p = self.changelog.parents(n)
885 if p[1] != nullid or p[0] == nullid:
886 if p[1] != nullid or p[0] == nullid:
886 b.append((t, n, p[0], p[1]))
887 b.append((t, n, p[0], p[1]))
887 break
888 break
888 n = p[0]
889 n = p[0]
889 return b
890 return b
890
891
891 def between(self, pairs):
892 def between(self, pairs):
892 r = []
893 r = []
893
894
894 for top, bottom in pairs:
895 for top, bottom in pairs:
895 n, l, i = top, [], 0
896 n, l, i = top, [], 0
896 f = 1
897 f = 1
897
898
898 while n != bottom:
899 while n != bottom:
899 p = self.changelog.parents(n)[0]
900 p = self.changelog.parents(n)[0]
900 if i == f:
901 if i == f:
901 l.append(n)
902 l.append(n)
902 f = f * 2
903 f = f * 2
903 n = p
904 n = p
904 i += 1
905 i += 1
905
906
906 r.append(l)
907 r.append(l)
907
908
908 return r
909 return r
909
910
910 def findincoming(self, remote, base=None, heads=None, force=False):
911 def findincoming(self, remote, base=None, heads=None, force=False):
911 """Return list of roots of the subsets of missing nodes from remote
912 """Return list of roots of the subsets of missing nodes from remote
912
913
913 If base dict is specified, assume that these nodes and their parents
914 If base dict is specified, assume that these nodes and their parents
914 exist on the remote side and that no child of a node of base exists
915 exist on the remote side and that no child of a node of base exists
915 in both remote and self.
916 in both remote and self.
916 Furthermore base will be updated to include the nodes that exists
917 Furthermore base will be updated to include the nodes that exists
917 in self and remote but no children exists in self and remote.
918 in self and remote but no children exists in self and remote.
918 If a list of heads is specified, return only nodes which are heads
919 If a list of heads is specified, return only nodes which are heads
919 or ancestors of these heads.
920 or ancestors of these heads.
920
921
921 All the ancestors of base are in self and in remote.
922 All the ancestors of base are in self and in remote.
922 All the descendants of the list returned are missing in self.
923 All the descendants of the list returned are missing in self.
923 (and so we know that the rest of the nodes are missing in remote, see
924 (and so we know that the rest of the nodes are missing in remote, see
924 outgoing)
925 outgoing)
925 """
926 """
926 m = self.changelog.nodemap
927 m = self.changelog.nodemap
927 search = []
928 search = []
928 fetch = {}
929 fetch = {}
929 seen = {}
930 seen = {}
930 seenbranch = {}
931 seenbranch = {}
931 if base == None:
932 if base == None:
932 base = {}
933 base = {}
933
934
934 if not heads:
935 if not heads:
935 heads = remote.heads()
936 heads = remote.heads()
936
937
937 if self.changelog.tip() == nullid:
938 if self.changelog.tip() == nullid:
938 base[nullid] = 1
939 base[nullid] = 1
939 if heads != [nullid]:
940 if heads != [nullid]:
940 return [nullid]
941 return [nullid]
941 return []
942 return []
942
943
943 # assume we're closer to the tip than the root
944 # assume we're closer to the tip than the root
944 # and start by examining the heads
945 # and start by examining the heads
945 self.ui.status(_("searching for changes\n"))
946 self.ui.status(_("searching for changes\n"))
946
947
947 unknown = []
948 unknown = []
948 for h in heads:
949 for h in heads:
949 if h not in m:
950 if h not in m:
950 unknown.append(h)
951 unknown.append(h)
951 else:
952 else:
952 base[h] = 1
953 base[h] = 1
953
954
954 if not unknown:
955 if not unknown:
955 return []
956 return []
956
957
957 req = dict.fromkeys(unknown)
958 req = dict.fromkeys(unknown)
958 reqcnt = 0
959 reqcnt = 0
959
960
960 # search through remote branches
961 # search through remote branches
961 # a 'branch' here is a linear segment of history, with four parts:
962 # a 'branch' here is a linear segment of history, with four parts:
962 # head, root, first parent, second parent
963 # head, root, first parent, second parent
963 # (a branch always has two parents (or none) by definition)
964 # (a branch always has two parents (or none) by definition)
964 unknown = remote.branches(unknown)
965 unknown = remote.branches(unknown)
965 while unknown:
966 while unknown:
966 r = []
967 r = []
967 while unknown:
968 while unknown:
968 n = unknown.pop(0)
969 n = unknown.pop(0)
969 if n[0] in seen:
970 if n[0] in seen:
970 continue
971 continue
971
972
972 self.ui.debug(_("examining %s:%s\n")
973 self.ui.debug(_("examining %s:%s\n")
973 % (short(n[0]), short(n[1])))
974 % (short(n[0]), short(n[1])))
974 if n[0] == nullid: # found the end of the branch
975 if n[0] == nullid: # found the end of the branch
975 pass
976 pass
976 elif n in seenbranch:
977 elif n in seenbranch:
977 self.ui.debug(_("branch already found\n"))
978 self.ui.debug(_("branch already found\n"))
978 continue
979 continue
979 elif n[1] and n[1] in m: # do we know the base?
980 elif n[1] and n[1] in m: # do we know the base?
980 self.ui.debug(_("found incomplete branch %s:%s\n")
981 self.ui.debug(_("found incomplete branch %s:%s\n")
981 % (short(n[0]), short(n[1])))
982 % (short(n[0]), short(n[1])))
982 search.append(n) # schedule branch range for scanning
983 search.append(n) # schedule branch range for scanning
983 seenbranch[n] = 1
984 seenbranch[n] = 1
984 else:
985 else:
985 if n[1] not in seen and n[1] not in fetch:
986 if n[1] not in seen and n[1] not in fetch:
986 if n[2] in m and n[3] in m:
987 if n[2] in m and n[3] in m:
987 self.ui.debug(_("found new changeset %s\n") %
988 self.ui.debug(_("found new changeset %s\n") %
988 short(n[1]))
989 short(n[1]))
989 fetch[n[1]] = 1 # earliest unknown
990 fetch[n[1]] = 1 # earliest unknown
990 for p in n[2:4]:
991 for p in n[2:4]:
991 if p in m:
992 if p in m:
992 base[p] = 1 # latest known
993 base[p] = 1 # latest known
993
994
994 for p in n[2:4]:
995 for p in n[2:4]:
995 if p not in req and p not in m:
996 if p not in req and p not in m:
996 r.append(p)
997 r.append(p)
997 req[p] = 1
998 req[p] = 1
998 seen[n[0]] = 1
999 seen[n[0]] = 1
999
1000
1000 if r:
1001 if r:
1001 reqcnt += 1
1002 reqcnt += 1
1002 self.ui.debug(_("request %d: %s\n") %
1003 self.ui.debug(_("request %d: %s\n") %
1003 (reqcnt, " ".join(map(short, r))))
1004 (reqcnt, " ".join(map(short, r))))
1004 for p in range(0, len(r), 10):
1005 for p in range(0, len(r), 10):
1005 for b in remote.branches(r[p:p+10]):
1006 for b in remote.branches(r[p:p+10]):
1006 self.ui.debug(_("received %s:%s\n") %
1007 self.ui.debug(_("received %s:%s\n") %
1007 (short(b[0]), short(b[1])))
1008 (short(b[0]), short(b[1])))
1008 unknown.append(b)
1009 unknown.append(b)
1009
1010
1010 # do binary search on the branches we found
1011 # do binary search on the branches we found
1011 while search:
1012 while search:
1012 n = search.pop(0)
1013 n = search.pop(0)
1013 reqcnt += 1
1014 reqcnt += 1
1014 l = remote.between([(n[0], n[1])])[0]
1015 l = remote.between([(n[0], n[1])])[0]
1015 l.append(n[1])
1016 l.append(n[1])
1016 p = n[0]
1017 p = n[0]
1017 f = 1
1018 f = 1
1018 for i in l:
1019 for i in l:
1019 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1020 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1020 if i in m:
1021 if i in m:
1021 if f <= 2:
1022 if f <= 2:
1022 self.ui.debug(_("found new branch changeset %s\n") %
1023 self.ui.debug(_("found new branch changeset %s\n") %
1023 short(p))
1024 short(p))
1024 fetch[p] = 1
1025 fetch[p] = 1
1025 base[i] = 1
1026 base[i] = 1
1026 else:
1027 else:
1027 self.ui.debug(_("narrowed branch search to %s:%s\n")
1028 self.ui.debug(_("narrowed branch search to %s:%s\n")
1028 % (short(p), short(i)))
1029 % (short(p), short(i)))
1029 search.append((p, i))
1030 search.append((p, i))
1030 break
1031 break
1031 p, f = i, f * 2
1032 p, f = i, f * 2
1032
1033
1033 # sanity check our fetch list
1034 # sanity check our fetch list
1034 for f in fetch.keys():
1035 for f in fetch.keys():
1035 if f in m:
1036 if f in m:
1036 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1037 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1037
1038
1038 if base.keys() == [nullid]:
1039 if base.keys() == [nullid]:
1039 if force:
1040 if force:
1040 self.ui.warn(_("warning: repository is unrelated\n"))
1041 self.ui.warn(_("warning: repository is unrelated\n"))
1041 else:
1042 else:
1042 raise util.Abort(_("repository is unrelated"))
1043 raise util.Abort(_("repository is unrelated"))
1043
1044
1044 self.ui.note(_("found new changesets starting at ") +
1045 self.ui.note(_("found new changesets starting at ") +
1045 " ".join([short(f) for f in fetch]) + "\n")
1046 " ".join([short(f) for f in fetch]) + "\n")
1046
1047
1047 self.ui.debug(_("%d total queries\n") % reqcnt)
1048 self.ui.debug(_("%d total queries\n") % reqcnt)
1048
1049
1049 return fetch.keys()
1050 return fetch.keys()
1050
1051
1051 def findoutgoing(self, remote, base=None, heads=None, force=False):
1052 def findoutgoing(self, remote, base=None, heads=None, force=False):
1052 """Return list of nodes that are roots of subsets not in remote
1053 """Return list of nodes that are roots of subsets not in remote
1053
1054
1054 If base dict is specified, assume that these nodes and their parents
1055 If base dict is specified, assume that these nodes and their parents
1055 exist on the remote side.
1056 exist on the remote side.
1056 If a list of heads is specified, return only nodes which are heads
1057 If a list of heads is specified, return only nodes which are heads
1057 or ancestors of these heads, and return a second element which
1058 or ancestors of these heads, and return a second element which
1058 contains all remote heads which get new children.
1059 contains all remote heads which get new children.
1059 """
1060 """
1060 if base == None:
1061 if base == None:
1061 base = {}
1062 base = {}
1062 self.findincoming(remote, base, heads, force=force)
1063 self.findincoming(remote, base, heads, force=force)
1063
1064
1064 self.ui.debug(_("common changesets up to ")
1065 self.ui.debug(_("common changesets up to ")
1065 + " ".join(map(short, base.keys())) + "\n")
1066 + " ".join(map(short, base.keys())) + "\n")
1066
1067
1067 remain = dict.fromkeys(self.changelog.nodemap)
1068 remain = dict.fromkeys(self.changelog.nodemap)
1068
1069
1069 # prune everything remote has from the tree
1070 # prune everything remote has from the tree
1070 del remain[nullid]
1071 del remain[nullid]
1071 remove = base.keys()
1072 remove = base.keys()
1072 while remove:
1073 while remove:
1073 n = remove.pop(0)
1074 n = remove.pop(0)
1074 if n in remain:
1075 if n in remain:
1075 del remain[n]
1076 del remain[n]
1076 for p in self.changelog.parents(n):
1077 for p in self.changelog.parents(n):
1077 remove.append(p)
1078 remove.append(p)
1078
1079
1079 # find every node whose parents have been pruned
1080 # find every node whose parents have been pruned
1080 subset = []
1081 subset = []
1081 # find every remote head that will get new children
1082 # find every remote head that will get new children
1082 updated_heads = {}
1083 updated_heads = {}
1083 for n in remain:
1084 for n in remain:
1084 p1, p2 = self.changelog.parents(n)
1085 p1, p2 = self.changelog.parents(n)
1085 if p1 not in remain and p2 not in remain:
1086 if p1 not in remain and p2 not in remain:
1086 subset.append(n)
1087 subset.append(n)
1087 if heads:
1088 if heads:
1088 if p1 in heads:
1089 if p1 in heads:
1089 updated_heads[p1] = True
1090 updated_heads[p1] = True
1090 if p2 in heads:
1091 if p2 in heads:
1091 updated_heads[p2] = True
1092 updated_heads[p2] = True
1092
1093
1093 # this is the set of all roots we have to push
1094 # this is the set of all roots we have to push
1094 if heads:
1095 if heads:
1095 return subset, updated_heads.keys()
1096 return subset, updated_heads.keys()
1096 else:
1097 else:
1097 return subset
1098 return subset
1098
1099
1099 def pull(self, remote, heads=None, force=False):
1100 def pull(self, remote, heads=None, force=False):
1100 l = self.lock()
1101 l = self.lock()
1101
1102
1102 fetch = self.findincoming(remote, force=force)
1103 fetch = self.findincoming(remote, force=force)
1103 if fetch == [nullid]:
1104 if fetch == [nullid]:
1104 self.ui.status(_("requesting all changes\n"))
1105 self.ui.status(_("requesting all changes\n"))
1105
1106
1106 if not fetch:
1107 if not fetch:
1107 self.ui.status(_("no changes found\n"))
1108 self.ui.status(_("no changes found\n"))
1108 return 0
1109 return 0
1109
1110
1110 if heads is None:
1111 if heads is None:
1111 cg = remote.changegroup(fetch, 'pull')
1112 cg = remote.changegroup(fetch, 'pull')
1112 else:
1113 else:
1113 cg = remote.changegroupsubset(fetch, heads, 'pull')
1114 cg = remote.changegroupsubset(fetch, heads, 'pull')
1114 return self.addchangegroup(cg, 'pull')
1115 return self.addchangegroup(cg, 'pull')
1115
1116
1116 def push(self, remote, force=False, revs=None):
1117 def push(self, remote, force=False, revs=None):
1117 # there are two ways to push to remote repo:
1118 # there are two ways to push to remote repo:
1118 #
1119 #
1119 # addchangegroup assumes local user can lock remote
1120 # addchangegroup assumes local user can lock remote
1120 # repo (local filesystem, old ssh servers).
1121 # repo (local filesystem, old ssh servers).
1121 #
1122 #
1122 # unbundle assumes local user cannot lock remote repo (new ssh
1123 # unbundle assumes local user cannot lock remote repo (new ssh
1123 # servers, http servers).
1124 # servers, http servers).
1124
1125
1125 if 'unbundle' in remote.capabilities:
1126 if 'unbundle' in remote.capabilities:
1126 return self.push_unbundle(remote, force, revs)
1127 return self.push_unbundle(remote, force, revs)
1127 return self.push_addchangegroup(remote, force, revs)
1128 return self.push_addchangegroup(remote, force, revs)
1128
1129
1129 def prepush(self, remote, force, revs):
1130 def prepush(self, remote, force, revs):
1130 base = {}
1131 base = {}
1131 remote_heads = remote.heads()
1132 remote_heads = remote.heads()
1132 inc = self.findincoming(remote, base, remote_heads, force=force)
1133 inc = self.findincoming(remote, base, remote_heads, force=force)
1133 if not force and inc:
1134 if not force and inc:
1134 self.ui.warn(_("abort: unsynced remote changes!\n"))
1135 self.ui.warn(_("abort: unsynced remote changes!\n"))
1135 self.ui.status(_("(did you forget to sync?"
1136 self.ui.status(_("(did you forget to sync?"
1136 " use push -f to force)\n"))
1137 " use push -f to force)\n"))
1137 return None, 1
1138 return None, 1
1138
1139
1139 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1140 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1140 if revs is not None:
1141 if revs is not None:
1141 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1142 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1142 else:
1143 else:
1143 bases, heads = update, self.changelog.heads()
1144 bases, heads = update, self.changelog.heads()
1144
1145
1145 if not bases:
1146 if not bases:
1146 self.ui.status(_("no changes found\n"))
1147 self.ui.status(_("no changes found\n"))
1147 return None, 1
1148 return None, 1
1148 elif not force:
1149 elif not force:
1149 # FIXME we don't properly detect creation of new heads
1150 # FIXME we don't properly detect creation of new heads
1150 # in the push -r case, assume the user knows what he's doing
1151 # in the push -r case, assume the user knows what he's doing
1151 if not revs and len(remote_heads) < len(heads) \
1152 if not revs and len(remote_heads) < len(heads) \
1152 and remote_heads != [nullid]:
1153 and remote_heads != [nullid]:
1153 self.ui.warn(_("abort: push creates new remote branches!\n"))
1154 self.ui.warn(_("abort: push creates new remote branches!\n"))
1154 self.ui.status(_("(did you forget to merge?"
1155 self.ui.status(_("(did you forget to merge?"
1155 " use push -f to force)\n"))
1156 " use push -f to force)\n"))
1156 return None, 1
1157 return None, 1
1157
1158
1158 if revs is None:
1159 if revs is None:
1159 cg = self.changegroup(update, 'push')
1160 cg = self.changegroup(update, 'push')
1160 else:
1161 else:
1161 cg = self.changegroupsubset(update, revs, 'push')
1162 cg = self.changegroupsubset(update, revs, 'push')
1162 return cg, remote_heads
1163 return cg, remote_heads
1163
1164
1164 def push_addchangegroup(self, remote, force, revs):
1165 def push_addchangegroup(self, remote, force, revs):
1165 lock = remote.lock()
1166 lock = remote.lock()
1166
1167
1167 ret = self.prepush(remote, force, revs)
1168 ret = self.prepush(remote, force, revs)
1168 if ret[0] is not None:
1169 if ret[0] is not None:
1169 cg, remote_heads = ret
1170 cg, remote_heads = ret
1170 return remote.addchangegroup(cg, 'push')
1171 return remote.addchangegroup(cg, 'push')
1171 return ret[1]
1172 return ret[1]
1172
1173
1173 def push_unbundle(self, remote, force, revs):
1174 def push_unbundle(self, remote, force, revs):
1174 # local repo finds heads on server, finds out what revs it
1175 # local repo finds heads on server, finds out what revs it
1175 # must push. once revs transferred, if server finds it has
1176 # must push. once revs transferred, if server finds it has
1176 # different heads (someone else won commit/push race), server
1177 # different heads (someone else won commit/push race), server
1177 # aborts.
1178 # aborts.
1178
1179
1179 ret = self.prepush(remote, force, revs)
1180 ret = self.prepush(remote, force, revs)
1180 if ret[0] is not None:
1181 if ret[0] is not None:
1181 cg, remote_heads = ret
1182 cg, remote_heads = ret
1182 if force: remote_heads = ['force']
1183 if force: remote_heads = ['force']
1183 return remote.unbundle(cg, remote_heads, 'push')
1184 return remote.unbundle(cg, remote_heads, 'push')
1184 return ret[1]
1185 return ret[1]
1185
1186
1186 def changegroupsubset(self, bases, heads, source):
1187 def changegroupsubset(self, bases, heads, source):
1187 """This function generates a changegroup consisting of all the nodes
1188 """This function generates a changegroup consisting of all the nodes
1188 that are descendents of any of the bases, and ancestors of any of
1189 that are descendents of any of the bases, and ancestors of any of
1189 the heads.
1190 the heads.
1190
1191
1191 It is fairly complex as determining which filenodes and which
1192 It is fairly complex as determining which filenodes and which
1192 manifest nodes need to be included for the changeset to be complete
1193 manifest nodes need to be included for the changeset to be complete
1193 is non-trivial.
1194 is non-trivial.
1194
1195
1195 Another wrinkle is doing the reverse, figuring out which changeset in
1196 Another wrinkle is doing the reverse, figuring out which changeset in
1196 the changegroup a particular filenode or manifestnode belongs to."""
1197 the changegroup a particular filenode or manifestnode belongs to."""
1197
1198
1198 self.hook('preoutgoing', throw=True, source=source)
1199 self.hook('preoutgoing', throw=True, source=source)
1199
1200
1200 # Set up some initial variables
1201 # Set up some initial variables
1201 # Make it easy to refer to self.changelog
1202 # Make it easy to refer to self.changelog
1202 cl = self.changelog
1203 cl = self.changelog
1203 # msng is short for missing - compute the list of changesets in this
1204 # msng is short for missing - compute the list of changesets in this
1204 # changegroup.
1205 # changegroup.
1205 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1206 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1206 # Some bases may turn out to be superfluous, and some heads may be
1207 # Some bases may turn out to be superfluous, and some heads may be
1207 # too. nodesbetween will return the minimal set of bases and heads
1208 # too. nodesbetween will return the minimal set of bases and heads
1208 # necessary to re-create the changegroup.
1209 # necessary to re-create the changegroup.
1209
1210
1210 # Known heads are the list of heads that it is assumed the recipient
1211 # Known heads are the list of heads that it is assumed the recipient
1211 # of this changegroup will know about.
1212 # of this changegroup will know about.
1212 knownheads = {}
1213 knownheads = {}
1213 # We assume that all parents of bases are known heads.
1214 # We assume that all parents of bases are known heads.
1214 for n in bases:
1215 for n in bases:
1215 for p in cl.parents(n):
1216 for p in cl.parents(n):
1216 if p != nullid:
1217 if p != nullid:
1217 knownheads[p] = 1
1218 knownheads[p] = 1
1218 knownheads = knownheads.keys()
1219 knownheads = knownheads.keys()
1219 if knownheads:
1220 if knownheads:
1220 # Now that we know what heads are known, we can compute which
1221 # Now that we know what heads are known, we can compute which
1221 # changesets are known. The recipient must know about all
1222 # changesets are known. The recipient must know about all
1222 # changesets required to reach the known heads from the null
1223 # changesets required to reach the known heads from the null
1223 # changeset.
1224 # changeset.
1224 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1225 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1225 junk = None
1226 junk = None
1226 # Transform the list into an ersatz set.
1227 # Transform the list into an ersatz set.
1227 has_cl_set = dict.fromkeys(has_cl_set)
1228 has_cl_set = dict.fromkeys(has_cl_set)
1228 else:
1229 else:
1229 # If there were no known heads, the recipient cannot be assumed to
1230 # If there were no known heads, the recipient cannot be assumed to
1230 # know about any changesets.
1231 # know about any changesets.
1231 has_cl_set = {}
1232 has_cl_set = {}
1232
1233
1233 # Make it easy to refer to self.manifest
1234 # Make it easy to refer to self.manifest
1234 mnfst = self.manifest
1235 mnfst = self.manifest
1235 # We don't know which manifests are missing yet
1236 # We don't know which manifests are missing yet
1236 msng_mnfst_set = {}
1237 msng_mnfst_set = {}
1237 # Nor do we know which filenodes are missing.
1238 # Nor do we know which filenodes are missing.
1238 msng_filenode_set = {}
1239 msng_filenode_set = {}
1239
1240
1240 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1241 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1241 junk = None
1242 junk = None
1242
1243
1243 # A changeset always belongs to itself, so the changenode lookup
1244 # A changeset always belongs to itself, so the changenode lookup
1244 # function for a changenode is identity.
1245 # function for a changenode is identity.
1245 def identity(x):
1246 def identity(x):
1246 return x
1247 return x
1247
1248
1248 # A function generating function. Sets up an environment for the
1249 # A function generating function. Sets up an environment for the
1249 # inner function.
1250 # inner function.
1250 def cmp_by_rev_func(revlog):
1251 def cmp_by_rev_func(revlog):
1251 # Compare two nodes by their revision number in the environment's
1252 # Compare two nodes by their revision number in the environment's
1252 # revision history. Since the revision number both represents the
1253 # revision history. Since the revision number both represents the
1253 # most efficient order to read the nodes in, and represents a
1254 # most efficient order to read the nodes in, and represents a
1254 # topological sorting of the nodes, this function is often useful.
1255 # topological sorting of the nodes, this function is often useful.
1255 def cmp_by_rev(a, b):
1256 def cmp_by_rev(a, b):
1256 return cmp(revlog.rev(a), revlog.rev(b))
1257 return cmp(revlog.rev(a), revlog.rev(b))
1257 return cmp_by_rev
1258 return cmp_by_rev
1258
1259
1259 # If we determine that a particular file or manifest node must be a
1260 # If we determine that a particular file or manifest node must be a
1260 # node that the recipient of the changegroup will already have, we can
1261 # node that the recipient of the changegroup will already have, we can
1261 # also assume the recipient will have all the parents. This function
1262 # also assume the recipient will have all the parents. This function
1262 # prunes them from the set of missing nodes.
1263 # prunes them from the set of missing nodes.
1263 def prune_parents(revlog, hasset, msngset):
1264 def prune_parents(revlog, hasset, msngset):
1264 haslst = hasset.keys()
1265 haslst = hasset.keys()
1265 haslst.sort(cmp_by_rev_func(revlog))
1266 haslst.sort(cmp_by_rev_func(revlog))
1266 for node in haslst:
1267 for node in haslst:
1267 parentlst = [p for p in revlog.parents(node) if p != nullid]
1268 parentlst = [p for p in revlog.parents(node) if p != nullid]
1268 while parentlst:
1269 while parentlst:
1269 n = parentlst.pop()
1270 n = parentlst.pop()
1270 if n not in hasset:
1271 if n not in hasset:
1271 hasset[n] = 1
1272 hasset[n] = 1
1272 p = [p for p in revlog.parents(n) if p != nullid]
1273 p = [p for p in revlog.parents(n) if p != nullid]
1273 parentlst.extend(p)
1274 parentlst.extend(p)
1274 for n in hasset:
1275 for n in hasset:
1275 msngset.pop(n, None)
1276 msngset.pop(n, None)
1276
1277
1277 # This is a function generating function used to set up an environment
1278 # This is a function generating function used to set up an environment
1278 # for the inner function to execute in.
1279 # for the inner function to execute in.
1279 def manifest_and_file_collector(changedfileset):
1280 def manifest_and_file_collector(changedfileset):
1280 # This is an information gathering function that gathers
1281 # This is an information gathering function that gathers
1281 # information from each changeset node that goes out as part of
1282 # information from each changeset node that goes out as part of
1282 # the changegroup. The information gathered is a list of which
1283 # the changegroup. The information gathered is a list of which
1283 # manifest nodes are potentially required (the recipient may
1284 # manifest nodes are potentially required (the recipient may
1284 # already have them) and total list of all files which were
1285 # already have them) and total list of all files which were
1285 # changed in any changeset in the changegroup.
1286 # changed in any changeset in the changegroup.
1286 #
1287 #
1287 # We also remember the first changenode we saw any manifest
1288 # We also remember the first changenode we saw any manifest
1288 # referenced by so we can later determine which changenode 'owns'
1289 # referenced by so we can later determine which changenode 'owns'
1289 # the manifest.
1290 # the manifest.
1290 def collect_manifests_and_files(clnode):
1291 def collect_manifests_and_files(clnode):
1291 c = cl.read(clnode)
1292 c = cl.read(clnode)
1292 for f in c[3]:
1293 for f in c[3]:
1293 # This is to make sure we only have one instance of each
1294 # This is to make sure we only have one instance of each
1294 # filename string for each filename.
1295 # filename string for each filename.
1295 changedfileset.setdefault(f, f)
1296 changedfileset.setdefault(f, f)
1296 msng_mnfst_set.setdefault(c[0], clnode)
1297 msng_mnfst_set.setdefault(c[0], clnode)
1297 return collect_manifests_and_files
1298 return collect_manifests_and_files
1298
1299
1299 # Figure out which manifest nodes (of the ones we think might be part
1300 # Figure out which manifest nodes (of the ones we think might be part
1300 # of the changegroup) the recipient must know about and remove them
1301 # of the changegroup) the recipient must know about and remove them
1301 # from the changegroup.
1302 # from the changegroup.
1302 def prune_manifests():
1303 def prune_manifests():
1303 has_mnfst_set = {}
1304 has_mnfst_set = {}
1304 for n in msng_mnfst_set:
1305 for n in msng_mnfst_set:
1305 # If a 'missing' manifest thinks it belongs to a changenode
1306 # If a 'missing' manifest thinks it belongs to a changenode
1306 # the recipient is assumed to have, obviously the recipient
1307 # the recipient is assumed to have, obviously the recipient
1307 # must have that manifest.
1308 # must have that manifest.
1308 linknode = cl.node(mnfst.linkrev(n))
1309 linknode = cl.node(mnfst.linkrev(n))
1309 if linknode in has_cl_set:
1310 if linknode in has_cl_set:
1310 has_mnfst_set[n] = 1
1311 has_mnfst_set[n] = 1
1311 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1312 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1312
1313
1313 # Use the information collected in collect_manifests_and_files to say
1314 # Use the information collected in collect_manifests_and_files to say
1314 # which changenode any manifestnode belongs to.
1315 # which changenode any manifestnode belongs to.
1315 def lookup_manifest_link(mnfstnode):
1316 def lookup_manifest_link(mnfstnode):
1316 return msng_mnfst_set[mnfstnode]
1317 return msng_mnfst_set[mnfstnode]
1317
1318
1318 # A function generating function that sets up the initial environment
1319 # A function generating function that sets up the initial environment
1319 # the inner function.
1320 # the inner function.
1320 def filenode_collector(changedfiles):
1321 def filenode_collector(changedfiles):
1321 next_rev = [0]
1322 next_rev = [0]
1322 # This gathers information from each manifestnode included in the
1323 # This gathers information from each manifestnode included in the
1323 # changegroup about which filenodes the manifest node references
1324 # changegroup about which filenodes the manifest node references
1324 # so we can include those in the changegroup too.
1325 # so we can include those in the changegroup too.
1325 #
1326 #
1326 # It also remembers which changenode each filenode belongs to. It
1327 # It also remembers which changenode each filenode belongs to. It
1327 # does this by assuming the a filenode belongs to the changenode
1328 # does this by assuming the a filenode belongs to the changenode
1328 # the first manifest that references it belongs to.
1329 # the first manifest that references it belongs to.
1329 def collect_msng_filenodes(mnfstnode):
1330 def collect_msng_filenodes(mnfstnode):
1330 r = mnfst.rev(mnfstnode)
1331 r = mnfst.rev(mnfstnode)
1331 if r == next_rev[0]:
1332 if r == next_rev[0]:
1332 # If the last rev we looked at was the one just previous,
1333 # If the last rev we looked at was the one just previous,
1333 # we only need to see a diff.
1334 # we only need to see a diff.
1334 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1335 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1335 # For each line in the delta
1336 # For each line in the delta
1336 for dline in delta.splitlines():
1337 for dline in delta.splitlines():
1337 # get the filename and filenode for that line
1338 # get the filename and filenode for that line
1338 f, fnode = dline.split('\0')
1339 f, fnode = dline.split('\0')
1339 fnode = bin(fnode[:40])
1340 fnode = bin(fnode[:40])
1340 f = changedfiles.get(f, None)
1341 f = changedfiles.get(f, None)
1341 # And if the file is in the list of files we care
1342 # And if the file is in the list of files we care
1342 # about.
1343 # about.
1343 if f is not None:
1344 if f is not None:
1344 # Get the changenode this manifest belongs to
1345 # Get the changenode this manifest belongs to
1345 clnode = msng_mnfst_set[mnfstnode]
1346 clnode = msng_mnfst_set[mnfstnode]
1346 # Create the set of filenodes for the file if
1347 # Create the set of filenodes for the file if
1347 # there isn't one already.
1348 # there isn't one already.
1348 ndset = msng_filenode_set.setdefault(f, {})
1349 ndset = msng_filenode_set.setdefault(f, {})
1349 # And set the filenode's changelog node to the
1350 # And set the filenode's changelog node to the
1350 # manifest's if it hasn't been set already.
1351 # manifest's if it hasn't been set already.
1351 ndset.setdefault(fnode, clnode)
1352 ndset.setdefault(fnode, clnode)
1352 else:
1353 else:
1353 # Otherwise we need a full manifest.
1354 # Otherwise we need a full manifest.
1354 m = mnfst.read(mnfstnode)
1355 m = mnfst.read(mnfstnode)
1355 # For every file in we care about.
1356 # For every file in we care about.
1356 for f in changedfiles:
1357 for f in changedfiles:
1357 fnode = m.get(f, None)
1358 fnode = m.get(f, None)
1358 # If it's in the manifest
1359 # If it's in the manifest
1359 if fnode is not None:
1360 if fnode is not None:
1360 # See comments above.
1361 # See comments above.
1361 clnode = msng_mnfst_set[mnfstnode]
1362 clnode = msng_mnfst_set[mnfstnode]
1362 ndset = msng_filenode_set.setdefault(f, {})
1363 ndset = msng_filenode_set.setdefault(f, {})
1363 ndset.setdefault(fnode, clnode)
1364 ndset.setdefault(fnode, clnode)
1364 # Remember the revision we hope to see next.
1365 # Remember the revision we hope to see next.
1365 next_rev[0] = r + 1
1366 next_rev[0] = r + 1
1366 return collect_msng_filenodes
1367 return collect_msng_filenodes
1367
1368
1368 # We have a list of filenodes we think we need for a file, lets remove
1369 # We have a list of filenodes we think we need for a file, lets remove
1369 # all those we now the recipient must have.
1370 # all those we now the recipient must have.
1370 def prune_filenodes(f, filerevlog):
1371 def prune_filenodes(f, filerevlog):
1371 msngset = msng_filenode_set[f]
1372 msngset = msng_filenode_set[f]
1372 hasset = {}
1373 hasset = {}
1373 # If a 'missing' filenode thinks it belongs to a changenode we
1374 # If a 'missing' filenode thinks it belongs to a changenode we
1374 # assume the recipient must have, then the recipient must have
1375 # assume the recipient must have, then the recipient must have
1375 # that filenode.
1376 # that filenode.
1376 for n in msngset:
1377 for n in msngset:
1377 clnode = cl.node(filerevlog.linkrev(n))
1378 clnode = cl.node(filerevlog.linkrev(n))
1378 if clnode in has_cl_set:
1379 if clnode in has_cl_set:
1379 hasset[n] = 1
1380 hasset[n] = 1
1380 prune_parents(filerevlog, hasset, msngset)
1381 prune_parents(filerevlog, hasset, msngset)
1381
1382
1382 # A function generator function that sets up the a context for the
1383 # A function generator function that sets up the a context for the
1383 # inner function.
1384 # inner function.
1384 def lookup_filenode_link_func(fname):
1385 def lookup_filenode_link_func(fname):
1385 msngset = msng_filenode_set[fname]
1386 msngset = msng_filenode_set[fname]
1386 # Lookup the changenode the filenode belongs to.
1387 # Lookup the changenode the filenode belongs to.
1387 def lookup_filenode_link(fnode):
1388 def lookup_filenode_link(fnode):
1388 return msngset[fnode]
1389 return msngset[fnode]
1389 return lookup_filenode_link
1390 return lookup_filenode_link
1390
1391
1391 # Now that we have all theses utility functions to help out and
1392 # Now that we have all theses utility functions to help out and
1392 # logically divide up the task, generate the group.
1393 # logically divide up the task, generate the group.
1393 def gengroup():
1394 def gengroup():
1394 # The set of changed files starts empty.
1395 # The set of changed files starts empty.
1395 changedfiles = {}
1396 changedfiles = {}
1396 # Create a changenode group generator that will call our functions
1397 # Create a changenode group generator that will call our functions
1397 # back to lookup the owning changenode and collect information.
1398 # back to lookup the owning changenode and collect information.
1398 group = cl.group(msng_cl_lst, identity,
1399 group = cl.group(msng_cl_lst, identity,
1399 manifest_and_file_collector(changedfiles))
1400 manifest_and_file_collector(changedfiles))
1400 for chnk in group:
1401 for chnk in group:
1401 yield chnk
1402 yield chnk
1402
1403
1403 # The list of manifests has been collected by the generator
1404 # The list of manifests has been collected by the generator
1404 # calling our functions back.
1405 # calling our functions back.
1405 prune_manifests()
1406 prune_manifests()
1406 msng_mnfst_lst = msng_mnfst_set.keys()
1407 msng_mnfst_lst = msng_mnfst_set.keys()
1407 # Sort the manifestnodes by revision number.
1408 # Sort the manifestnodes by revision number.
1408 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1409 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1409 # Create a generator for the manifestnodes that calls our lookup
1410 # Create a generator for the manifestnodes that calls our lookup
1410 # and data collection functions back.
1411 # and data collection functions back.
1411 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1412 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1412 filenode_collector(changedfiles))
1413 filenode_collector(changedfiles))
1413 for chnk in group:
1414 for chnk in group:
1414 yield chnk
1415 yield chnk
1415
1416
1416 # These are no longer needed, dereference and toss the memory for
1417 # These are no longer needed, dereference and toss the memory for
1417 # them.
1418 # them.
1418 msng_mnfst_lst = None
1419 msng_mnfst_lst = None
1419 msng_mnfst_set.clear()
1420 msng_mnfst_set.clear()
1420
1421
1421 changedfiles = changedfiles.keys()
1422 changedfiles = changedfiles.keys()
1422 changedfiles.sort()
1423 changedfiles.sort()
1423 # Go through all our files in order sorted by name.
1424 # Go through all our files in order sorted by name.
1424 for fname in changedfiles:
1425 for fname in changedfiles:
1425 filerevlog = self.file(fname)
1426 filerevlog = self.file(fname)
1426 # Toss out the filenodes that the recipient isn't really
1427 # Toss out the filenodes that the recipient isn't really
1427 # missing.
1428 # missing.
1428 if msng_filenode_set.has_key(fname):
1429 if msng_filenode_set.has_key(fname):
1429 prune_filenodes(fname, filerevlog)
1430 prune_filenodes(fname, filerevlog)
1430 msng_filenode_lst = msng_filenode_set[fname].keys()
1431 msng_filenode_lst = msng_filenode_set[fname].keys()
1431 else:
1432 else:
1432 msng_filenode_lst = []
1433 msng_filenode_lst = []
1433 # If any filenodes are left, generate the group for them,
1434 # If any filenodes are left, generate the group for them,
1434 # otherwise don't bother.
1435 # otherwise don't bother.
1435 if len(msng_filenode_lst) > 0:
1436 if len(msng_filenode_lst) > 0:
1436 yield changegroup.genchunk(fname)
1437 yield changegroup.genchunk(fname)
1437 # Sort the filenodes by their revision #
1438 # Sort the filenodes by their revision #
1438 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1439 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1439 # Create a group generator and only pass in a changenode
1440 # Create a group generator and only pass in a changenode
1440 # lookup function as we need to collect no information
1441 # lookup function as we need to collect no information
1441 # from filenodes.
1442 # from filenodes.
1442 group = filerevlog.group(msng_filenode_lst,
1443 group = filerevlog.group(msng_filenode_lst,
1443 lookup_filenode_link_func(fname))
1444 lookup_filenode_link_func(fname))
1444 for chnk in group:
1445 for chnk in group:
1445 yield chnk
1446 yield chnk
1446 if msng_filenode_set.has_key(fname):
1447 if msng_filenode_set.has_key(fname):
1447 # Don't need this anymore, toss it to free memory.
1448 # Don't need this anymore, toss it to free memory.
1448 del msng_filenode_set[fname]
1449 del msng_filenode_set[fname]
1449 # Signal that no more groups are left.
1450 # Signal that no more groups are left.
1450 yield changegroup.closechunk()
1451 yield changegroup.closechunk()
1451
1452
1452 if msng_cl_lst:
1453 if msng_cl_lst:
1453 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1454 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1454
1455
1455 return util.chunkbuffer(gengroup())
1456 return util.chunkbuffer(gengroup())
1456
1457
1457 def changegroup(self, basenodes, source):
1458 def changegroup(self, basenodes, source):
1458 """Generate a changegroup of all nodes that we have that a recipient
1459 """Generate a changegroup of all nodes that we have that a recipient
1459 doesn't.
1460 doesn't.
1460
1461
1461 This is much easier than the previous function as we can assume that
1462 This is much easier than the previous function as we can assume that
1462 the recipient has any changenode we aren't sending them."""
1463 the recipient has any changenode we aren't sending them."""
1463
1464
1464 self.hook('preoutgoing', throw=True, source=source)
1465 self.hook('preoutgoing', throw=True, source=source)
1465
1466
1466 cl = self.changelog
1467 cl = self.changelog
1467 nodes = cl.nodesbetween(basenodes, None)[0]
1468 nodes = cl.nodesbetween(basenodes, None)[0]
1468 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1469 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1469
1470
1470 def identity(x):
1471 def identity(x):
1471 return x
1472 return x
1472
1473
1473 def gennodelst(revlog):
1474 def gennodelst(revlog):
1474 for r in xrange(0, revlog.count()):
1475 for r in xrange(0, revlog.count()):
1475 n = revlog.node(r)
1476 n = revlog.node(r)
1476 if revlog.linkrev(n) in revset:
1477 if revlog.linkrev(n) in revset:
1477 yield n
1478 yield n
1478
1479
1479 def changed_file_collector(changedfileset):
1480 def changed_file_collector(changedfileset):
1480 def collect_changed_files(clnode):
1481 def collect_changed_files(clnode):
1481 c = cl.read(clnode)
1482 c = cl.read(clnode)
1482 for fname in c[3]:
1483 for fname in c[3]:
1483 changedfileset[fname] = 1
1484 changedfileset[fname] = 1
1484 return collect_changed_files
1485 return collect_changed_files
1485
1486
1486 def lookuprevlink_func(revlog):
1487 def lookuprevlink_func(revlog):
1487 def lookuprevlink(n):
1488 def lookuprevlink(n):
1488 return cl.node(revlog.linkrev(n))
1489 return cl.node(revlog.linkrev(n))
1489 return lookuprevlink
1490 return lookuprevlink
1490
1491
1491 def gengroup():
1492 def gengroup():
1492 # construct a list of all changed files
1493 # construct a list of all changed files
1493 changedfiles = {}
1494 changedfiles = {}
1494
1495
1495 for chnk in cl.group(nodes, identity,
1496 for chnk in cl.group(nodes, identity,
1496 changed_file_collector(changedfiles)):
1497 changed_file_collector(changedfiles)):
1497 yield chnk
1498 yield chnk
1498 changedfiles = changedfiles.keys()
1499 changedfiles = changedfiles.keys()
1499 changedfiles.sort()
1500 changedfiles.sort()
1500
1501
1501 mnfst = self.manifest
1502 mnfst = self.manifest
1502 nodeiter = gennodelst(mnfst)
1503 nodeiter = gennodelst(mnfst)
1503 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1504 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1504 yield chnk
1505 yield chnk
1505
1506
1506 for fname in changedfiles:
1507 for fname in changedfiles:
1507 filerevlog = self.file(fname)
1508 filerevlog = self.file(fname)
1508 nodeiter = gennodelst(filerevlog)
1509 nodeiter = gennodelst(filerevlog)
1509 nodeiter = list(nodeiter)
1510 nodeiter = list(nodeiter)
1510 if nodeiter:
1511 if nodeiter:
1511 yield changegroup.genchunk(fname)
1512 yield changegroup.genchunk(fname)
1512 lookup = lookuprevlink_func(filerevlog)
1513 lookup = lookuprevlink_func(filerevlog)
1513 for chnk in filerevlog.group(nodeiter, lookup):
1514 for chnk in filerevlog.group(nodeiter, lookup):
1514 yield chnk
1515 yield chnk
1515
1516
1516 yield changegroup.closechunk()
1517 yield changegroup.closechunk()
1517
1518
1518 if nodes:
1519 if nodes:
1519 self.hook('outgoing', node=hex(nodes[0]), source=source)
1520 self.hook('outgoing', node=hex(nodes[0]), source=source)
1520
1521
1521 return util.chunkbuffer(gengroup())
1522 return util.chunkbuffer(gengroup())
1522
1523
1523 def addchangegroup(self, source, srctype):
1524 def addchangegroup(self, source, srctype):
1524 """add changegroup to repo.
1525 """add changegroup to repo.
1525 returns number of heads modified or added + 1."""
1526 returns number of heads modified or added + 1."""
1526
1527
1527 def csmap(x):
1528 def csmap(x):
1528 self.ui.debug(_("add changeset %s\n") % short(x))
1529 self.ui.debug(_("add changeset %s\n") % short(x))
1529 return cl.count()
1530 return cl.count()
1530
1531
1531 def revmap(x):
1532 def revmap(x):
1532 return cl.rev(x)
1533 return cl.rev(x)
1533
1534
1534 if not source:
1535 if not source:
1535 return 0
1536 return 0
1536
1537
1537 self.hook('prechangegroup', throw=True, source=srctype)
1538 self.hook('prechangegroup', throw=True, source=srctype)
1538
1539
1539 changesets = files = revisions = 0
1540 changesets = files = revisions = 0
1540
1541
1541 tr = self.transaction()
1542 tr = self.transaction()
1542
1543
1543 # write changelog data to temp files so concurrent readers will not see
1544 # write changelog data to temp files so concurrent readers will not see
1544 # inconsistent view
1545 # inconsistent view
1545 cl = None
1546 cl = None
1546 try:
1547 try:
1547 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1548 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1548
1549
1549 oldheads = len(cl.heads())
1550 oldheads = len(cl.heads())
1550
1551
1551 # pull off the changeset group
1552 # pull off the changeset group
1552 self.ui.status(_("adding changesets\n"))
1553 self.ui.status(_("adding changesets\n"))
1553 cor = cl.count() - 1
1554 cor = cl.count() - 1
1554 chunkiter = changegroup.chunkiter(source)
1555 chunkiter = changegroup.chunkiter(source)
1555 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1556 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1556 raise util.Abort(_("received changelog group is empty"))
1557 raise util.Abort(_("received changelog group is empty"))
1557 cnr = cl.count() - 1
1558 cnr = cl.count() - 1
1558 changesets = cnr - cor
1559 changesets = cnr - cor
1559
1560
1560 # pull off the manifest group
1561 # pull off the manifest group
1561 self.ui.status(_("adding manifests\n"))
1562 self.ui.status(_("adding manifests\n"))
1562 chunkiter = changegroup.chunkiter(source)
1563 chunkiter = changegroup.chunkiter(source)
1563 # no need to check for empty manifest group here:
1564 # no need to check for empty manifest group here:
1564 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1565 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1565 # no new manifest will be created and the manifest group will
1566 # no new manifest will be created and the manifest group will
1566 # be empty during the pull
1567 # be empty during the pull
1567 self.manifest.addgroup(chunkiter, revmap, tr)
1568 self.manifest.addgroup(chunkiter, revmap, tr)
1568
1569
1569 # process the files
1570 # process the files
1570 self.ui.status(_("adding file changes\n"))
1571 self.ui.status(_("adding file changes\n"))
1571 while 1:
1572 while 1:
1572 f = changegroup.getchunk(source)
1573 f = changegroup.getchunk(source)
1573 if not f:
1574 if not f:
1574 break
1575 break
1575 self.ui.debug(_("adding %s revisions\n") % f)
1576 self.ui.debug(_("adding %s revisions\n") % f)
1576 fl = self.file(f)
1577 fl = self.file(f)
1577 o = fl.count()
1578 o = fl.count()
1578 chunkiter = changegroup.chunkiter(source)
1579 chunkiter = changegroup.chunkiter(source)
1579 if fl.addgroup(chunkiter, revmap, tr) is None:
1580 if fl.addgroup(chunkiter, revmap, tr) is None:
1580 raise util.Abort(_("received file revlog group is empty"))
1581 raise util.Abort(_("received file revlog group is empty"))
1581 revisions += fl.count() - o
1582 revisions += fl.count() - o
1582 files += 1
1583 files += 1
1583
1584
1584 cl.writedata()
1585 cl.writedata()
1585 finally:
1586 finally:
1586 if cl:
1587 if cl:
1587 cl.cleanup()
1588 cl.cleanup()
1588
1589
1589 # make changelog see real files again
1590 # make changelog see real files again
1590 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1591 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1591 self.changelog.checkinlinesize(tr)
1592 self.changelog.checkinlinesize(tr)
1592
1593
1593 newheads = len(self.changelog.heads())
1594 newheads = len(self.changelog.heads())
1594 heads = ""
1595 heads = ""
1595 if oldheads and newheads != oldheads:
1596 if oldheads and newheads != oldheads:
1596 heads = _(" (%+d heads)") % (newheads - oldheads)
1597 heads = _(" (%+d heads)") % (newheads - oldheads)
1597
1598
1598 self.ui.status(_("added %d changesets"
1599 self.ui.status(_("added %d changesets"
1599 " with %d changes to %d files%s\n")
1600 " with %d changes to %d files%s\n")
1600 % (changesets, revisions, files, heads))
1601 % (changesets, revisions, files, heads))
1601
1602
1602 if changesets > 0:
1603 if changesets > 0:
1603 self.hook('pretxnchangegroup', throw=True,
1604 self.hook('pretxnchangegroup', throw=True,
1604 node=hex(self.changelog.node(cor+1)), source=srctype)
1605 node=hex(self.changelog.node(cor+1)), source=srctype)
1605
1606
1606 tr.close()
1607 tr.close()
1607
1608
1608 if changesets > 0:
1609 if changesets > 0:
1609 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1610 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1610 source=srctype)
1611 source=srctype)
1611
1612
1612 for i in range(cor + 1, cnr + 1):
1613 for i in range(cor + 1, cnr + 1):
1613 self.hook("incoming", node=hex(self.changelog.node(i)),
1614 self.hook("incoming", node=hex(self.changelog.node(i)),
1614 source=srctype)
1615 source=srctype)
1615
1616
1616 return newheads - oldheads + 1
1617 return newheads - oldheads + 1
1617
1618
1618 def update(self, node, allow=False, force=False, choose=None,
1619 def update(self, node, allow=False, force=False, choose=None,
1619 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1620 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1620 pl = self.dirstate.parents()
1621 pl = self.dirstate.parents()
1621 if not force and pl[1] != nullid:
1622 if not force and pl[1] != nullid:
1622 raise util.Abort(_("outstanding uncommitted merges"))
1623 raise util.Abort(_("outstanding uncommitted merges"))
1623
1624
1624 err = False
1625 err = False
1625
1626
1626 p1, p2 = pl[0], node
1627 p1, p2 = pl[0], node
1627 pa = self.changelog.ancestor(p1, p2)
1628 pa = self.changelog.ancestor(p1, p2)
1628 m1n = self.changelog.read(p1)[0]
1629 m1n = self.changelog.read(p1)[0]
1629 m2n = self.changelog.read(p2)[0]
1630 m2n = self.changelog.read(p2)[0]
1630 man = self.manifest.ancestor(m1n, m2n)
1631 man = self.manifest.ancestor(m1n, m2n)
1631 m1 = self.manifest.read(m1n)
1632 m1 = self.manifest.read(m1n)
1632 mf1 = self.manifest.readflags(m1n)
1633 mf1 = self.manifest.readflags(m1n)
1633 m2 = self.manifest.read(m2n).copy()
1634 m2 = self.manifest.read(m2n).copy()
1634 mf2 = self.manifest.readflags(m2n)
1635 mf2 = self.manifest.readflags(m2n)
1635 ma = self.manifest.read(man)
1636 ma = self.manifest.read(man)
1636 mfa = self.manifest.readflags(man)
1637 mfa = self.manifest.readflags(man)
1637
1638
1638 modified, added, removed, deleted, unknown = self.changes()
1639 modified, added, removed, deleted, unknown = self.changes()
1639
1640
1640 # is this a jump, or a merge? i.e. is there a linear path
1641 # is this a jump, or a merge? i.e. is there a linear path
1641 # from p1 to p2?
1642 # from p1 to p2?
1642 linear_path = (pa == p1 or pa == p2)
1643 linear_path = (pa == p1 or pa == p2)
1643
1644
1644 if allow and linear_path:
1645 if allow and linear_path:
1645 raise util.Abort(_("there is nothing to merge, "
1646 raise util.Abort(_("there is nothing to merge, "
1646 "just use 'hg update'"))
1647 "just use 'hg update'"))
1647 if allow and not forcemerge:
1648 if allow and not forcemerge:
1648 if modified or added or removed:
1649 if modified or added or removed:
1649 raise util.Abort(_("outstanding uncommitted changes"))
1650 raise util.Abort(_("outstanding uncommitted changes"))
1650
1651
1651 if not forcemerge and not force:
1652 if not forcemerge and not force:
1652 for f in unknown:
1653 for f in unknown:
1653 if f in m2:
1654 if f in m2:
1654 t1 = self.wread(f)
1655 t1 = self.wread(f)
1655 t2 = self.file(f).read(m2[f])
1656 t2 = self.file(f).read(m2[f])
1656 if cmp(t1, t2) != 0:
1657 if cmp(t1, t2) != 0:
1657 raise util.Abort(_("'%s' already exists in the working"
1658 raise util.Abort(_("'%s' already exists in the working"
1658 " dir and differs from remote") % f)
1659 " dir and differs from remote") % f)
1659
1660
1660 # resolve the manifest to determine which files
1661 # resolve the manifest to determine which files
1661 # we care about merging
1662 # we care about merging
1662 self.ui.note(_("resolving manifests\n"))
1663 self.ui.note(_("resolving manifests\n"))
1663 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1664 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1664 (force, allow, moddirstate, linear_path))
1665 (force, allow, moddirstate, linear_path))
1665 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1666 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1666 (short(man), short(m1n), short(m2n)))
1667 (short(man), short(m1n), short(m2n)))
1667
1668
1668 merge = {}
1669 merge = {}
1669 get = {}
1670 get = {}
1670 remove = []
1671 remove = []
1671
1672
1672 # construct a working dir manifest
1673 # construct a working dir manifest
1673 mw = m1.copy()
1674 mw = m1.copy()
1674 mfw = mf1.copy()
1675 mfw = mf1.copy()
1675 umap = dict.fromkeys(unknown)
1676 umap = dict.fromkeys(unknown)
1676
1677
1677 for f in added + modified + unknown:
1678 for f in added + modified + unknown:
1678 mw[f] = ""
1679 mw[f] = ""
1679 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1680 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1680
1681
1681 if moddirstate and not wlock:
1682 if moddirstate and not wlock:
1682 wlock = self.wlock()
1683 wlock = self.wlock()
1683
1684
1684 for f in deleted + removed:
1685 for f in deleted + removed:
1685 if f in mw:
1686 if f in mw:
1686 del mw[f]
1687 del mw[f]
1687
1688
1688 # If we're jumping between revisions (as opposed to merging),
1689 # If we're jumping between revisions (as opposed to merging),
1689 # and if neither the working directory nor the target rev has
1690 # and if neither the working directory nor the target rev has
1690 # the file, then we need to remove it from the dirstate, to
1691 # the file, then we need to remove it from the dirstate, to
1691 # prevent the dirstate from listing the file when it is no
1692 # prevent the dirstate from listing the file when it is no
1692 # longer in the manifest.
1693 # longer in the manifest.
1693 if moddirstate and linear_path and f not in m2:
1694 if moddirstate and linear_path and f not in m2:
1694 self.dirstate.forget((f,))
1695 self.dirstate.forget((f,))
1695
1696
1696 # Compare manifests
1697 # Compare manifests
1697 for f, n in mw.iteritems():
1698 for f, n in mw.iteritems():
1698 if choose and not choose(f):
1699 if choose and not choose(f):
1699 continue
1700 continue
1700 if f in m2:
1701 if f in m2:
1701 s = 0
1702 s = 0
1702
1703
1703 # is the wfile new since m1, and match m2?
1704 # is the wfile new since m1, and match m2?
1704 if f not in m1:
1705 if f not in m1:
1705 t1 = self.wread(f)
1706 t1 = self.wread(f)
1706 t2 = self.file(f).read(m2[f])
1707 t2 = self.file(f).read(m2[f])
1707 if cmp(t1, t2) == 0:
1708 if cmp(t1, t2) == 0:
1708 n = m2[f]
1709 n = m2[f]
1709 del t1, t2
1710 del t1, t2
1710
1711
1711 # are files different?
1712 # are files different?
1712 if n != m2[f]:
1713 if n != m2[f]:
1713 a = ma.get(f, nullid)
1714 a = ma.get(f, nullid)
1714 # are both different from the ancestor?
1715 # are both different from the ancestor?
1715 if n != a and m2[f] != a:
1716 if n != a and m2[f] != a:
1716 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1717 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1717 # merge executable bits
1718 # merge executable bits
1718 # "if we changed or they changed, change in merge"
1719 # "if we changed or they changed, change in merge"
1719 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1720 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1720 mode = ((a^b) | (a^c)) ^ a
1721 mode = ((a^b) | (a^c)) ^ a
1721 merge[f] = (m1.get(f, nullid), m2[f], mode)
1722 merge[f] = (m1.get(f, nullid), m2[f], mode)
1722 s = 1
1723 s = 1
1723 # are we clobbering?
1724 # are we clobbering?
1724 # is remote's version newer?
1725 # is remote's version newer?
1725 # or are we going back in time?
1726 # or are we going back in time?
1726 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1727 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1727 self.ui.debug(_(" remote %s is newer, get\n") % f)
1728 self.ui.debug(_(" remote %s is newer, get\n") % f)
1728 get[f] = m2[f]
1729 get[f] = m2[f]
1729 s = 1
1730 s = 1
1730 elif f in umap or f in added:
1731 elif f in umap or f in added:
1731 # this unknown file is the same as the checkout
1732 # this unknown file is the same as the checkout
1732 # we need to reset the dirstate if the file was added
1733 # we need to reset the dirstate if the file was added
1733 get[f] = m2[f]
1734 get[f] = m2[f]
1734
1735
1735 if not s and mfw[f] != mf2[f]:
1736 if not s and mfw[f] != mf2[f]:
1736 if force:
1737 if force:
1737 self.ui.debug(_(" updating permissions for %s\n") % f)
1738 self.ui.debug(_(" updating permissions for %s\n") % f)
1738 util.set_exec(self.wjoin(f), mf2[f])
1739 util.set_exec(self.wjoin(f), mf2[f])
1739 else:
1740 else:
1740 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1741 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1741 mode = ((a^b) | (a^c)) ^ a
1742 mode = ((a^b) | (a^c)) ^ a
1742 if mode != b:
1743 if mode != b:
1743 self.ui.debug(_(" updating permissions for %s\n")
1744 self.ui.debug(_(" updating permissions for %s\n")
1744 % f)
1745 % f)
1745 util.set_exec(self.wjoin(f), mode)
1746 util.set_exec(self.wjoin(f), mode)
1746 del m2[f]
1747 del m2[f]
1747 elif f in ma:
1748 elif f in ma:
1748 if n != ma[f]:
1749 if n != ma[f]:
1749 r = _("d")
1750 r = _("d")
1750 if not force and (linear_path or allow):
1751 if not force and (linear_path or allow):
1751 r = self.ui.prompt(
1752 r = self.ui.prompt(
1752 (_(" local changed %s which remote deleted\n") % f) +
1753 (_(" local changed %s which remote deleted\n") % f) +
1753 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1754 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1754 if r == _("d"):
1755 if r == _("d"):
1755 remove.append(f)
1756 remove.append(f)
1756 else:
1757 else:
1757 self.ui.debug(_("other deleted %s\n") % f)
1758 self.ui.debug(_("other deleted %s\n") % f)
1758 remove.append(f) # other deleted it
1759 remove.append(f) # other deleted it
1759 else:
1760 else:
1760 # file is created on branch or in working directory
1761 # file is created on branch or in working directory
1761 if force and f not in umap:
1762 if force and f not in umap:
1762 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1763 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1763 remove.append(f)
1764 remove.append(f)
1764 elif n == m1.get(f, nullid): # same as parent
1765 elif n == m1.get(f, nullid): # same as parent
1765 if p2 == pa: # going backwards?
1766 if p2 == pa: # going backwards?
1766 self.ui.debug(_("remote deleted %s\n") % f)
1767 self.ui.debug(_("remote deleted %s\n") % f)
1767 remove.append(f)
1768 remove.append(f)
1768 else:
1769 else:
1769 self.ui.debug(_("local modified %s, keeping\n") % f)
1770 self.ui.debug(_("local modified %s, keeping\n") % f)
1770 else:
1771 else:
1771 self.ui.debug(_("working dir created %s, keeping\n") % f)
1772 self.ui.debug(_("working dir created %s, keeping\n") % f)
1772
1773
1773 for f, n in m2.iteritems():
1774 for f, n in m2.iteritems():
1774 if choose and not choose(f):
1775 if choose and not choose(f):
1775 continue
1776 continue
1776 if f[0] == "/":
1777 if f[0] == "/":
1777 continue
1778 continue
1778 if f in ma and n != ma[f]:
1779 if f in ma and n != ma[f]:
1779 r = _("k")
1780 r = _("k")
1780 if not force and (linear_path or allow):
1781 if not force and (linear_path or allow):
1781 r = self.ui.prompt(
1782 r = self.ui.prompt(
1782 (_("remote changed %s which local deleted\n") % f) +
1783 (_("remote changed %s which local deleted\n") % f) +
1783 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1784 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1784 if r == _("k"):
1785 if r == _("k"):
1785 get[f] = n
1786 get[f] = n
1786 elif f not in ma:
1787 elif f not in ma:
1787 self.ui.debug(_("remote created %s\n") % f)
1788 self.ui.debug(_("remote created %s\n") % f)
1788 get[f] = n
1789 get[f] = n
1789 else:
1790 else:
1790 if force or p2 == pa: # going backwards?
1791 if force or p2 == pa: # going backwards?
1791 self.ui.debug(_("local deleted %s, recreating\n") % f)
1792 self.ui.debug(_("local deleted %s, recreating\n") % f)
1792 get[f] = n
1793 get[f] = n
1793 else:
1794 else:
1794 self.ui.debug(_("local deleted %s\n") % f)
1795 self.ui.debug(_("local deleted %s\n") % f)
1795
1796
1796 del mw, m1, m2, ma
1797 del mw, m1, m2, ma
1797
1798
1798 if force:
1799 if force:
1799 for f in merge:
1800 for f in merge:
1800 get[f] = merge[f][1]
1801 get[f] = merge[f][1]
1801 merge = {}
1802 merge = {}
1802
1803
1803 if linear_path or force:
1804 if linear_path or force:
1804 # we don't need to do any magic, just jump to the new rev
1805 # we don't need to do any magic, just jump to the new rev
1805 branch_merge = False
1806 branch_merge = False
1806 p1, p2 = p2, nullid
1807 p1, p2 = p2, nullid
1807 else:
1808 else:
1808 if not allow:
1809 if not allow:
1809 self.ui.status(_("this update spans a branch"
1810 self.ui.status(_("this update spans a branch"
1810 " affecting the following files:\n"))
1811 " affecting the following files:\n"))
1811 fl = merge.keys() + get.keys()
1812 fl = merge.keys() + get.keys()
1812 fl.sort()
1813 fl.sort()
1813 for f in fl:
1814 for f in fl:
1814 cf = ""
1815 cf = ""
1815 if f in merge:
1816 if f in merge:
1816 cf = _(" (resolve)")
1817 cf = _(" (resolve)")
1817 self.ui.status(" %s%s\n" % (f, cf))
1818 self.ui.status(" %s%s\n" % (f, cf))
1818 self.ui.warn(_("aborting update spanning branches!\n"))
1819 self.ui.warn(_("aborting update spanning branches!\n"))
1819 self.ui.status(_("(use 'hg merge' to merge across branches"
1820 self.ui.status(_("(use 'hg merge' to merge across branches"
1820 " or 'hg update -C' to lose changes)\n"))
1821 " or 'hg update -C' to lose changes)\n"))
1821 return 1
1822 return 1
1822 branch_merge = True
1823 branch_merge = True
1823
1824
1824 xp1 = hex(p1)
1825 xp1 = hex(p1)
1825 xp2 = hex(p2)
1826 xp2 = hex(p2)
1826 if p2 == nullid: xxp2 = ''
1827 if p2 == nullid: xxp2 = ''
1827 else: xxp2 = xp2
1828 else: xxp2 = xp2
1828
1829
1829 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1830 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1830
1831
1831 # get the files we don't need to change
1832 # get the files we don't need to change
1832 files = get.keys()
1833 files = get.keys()
1833 files.sort()
1834 files.sort()
1834 for f in files:
1835 for f in files:
1835 if f[0] == "/":
1836 if f[0] == "/":
1836 continue
1837 continue
1837 self.ui.note(_("getting %s\n") % f)
1838 self.ui.note(_("getting %s\n") % f)
1838 t = self.file(f).read(get[f])
1839 t = self.file(f).read(get[f])
1839 self.wwrite(f, t)
1840 self.wwrite(f, t)
1840 util.set_exec(self.wjoin(f), mf2[f])
1841 util.set_exec(self.wjoin(f), mf2[f])
1841 if moddirstate:
1842 if moddirstate:
1842 if branch_merge:
1843 if branch_merge:
1843 self.dirstate.update([f], 'n', st_mtime=-1)
1844 self.dirstate.update([f], 'n', st_mtime=-1)
1844 else:
1845 else:
1845 self.dirstate.update([f], 'n')
1846 self.dirstate.update([f], 'n')
1846
1847
1847 # merge the tricky bits
1848 # merge the tricky bits
1848 failedmerge = []
1849 failedmerge = []
1849 files = merge.keys()
1850 files = merge.keys()
1850 files.sort()
1851 files.sort()
1851 for f in files:
1852 for f in files:
1852 self.ui.status(_("merging %s\n") % f)
1853 self.ui.status(_("merging %s\n") % f)
1853 my, other, flag = merge[f]
1854 my, other, flag = merge[f]
1854 ret = self.merge3(f, my, other, xp1, xp2)
1855 ret = self.merge3(f, my, other, xp1, xp2)
1855 if ret:
1856 if ret:
1856 err = True
1857 err = True
1857 failedmerge.append(f)
1858 failedmerge.append(f)
1858 util.set_exec(self.wjoin(f), flag)
1859 util.set_exec(self.wjoin(f), flag)
1859 if moddirstate:
1860 if moddirstate:
1860 if branch_merge:
1861 if branch_merge:
1861 # We've done a branch merge, mark this file as merged
1862 # We've done a branch merge, mark this file as merged
1862 # so that we properly record the merger later
1863 # so that we properly record the merger later
1863 self.dirstate.update([f], 'm')
1864 self.dirstate.update([f], 'm')
1864 else:
1865 else:
1865 # We've update-merged a locally modified file, so
1866 # We've update-merged a locally modified file, so
1866 # we set the dirstate to emulate a normal checkout
1867 # we set the dirstate to emulate a normal checkout
1867 # of that file some time in the past. Thus our
1868 # of that file some time in the past. Thus our
1868 # merge will appear as a normal local file
1869 # merge will appear as a normal local file
1869 # modification.
1870 # modification.
1870 f_len = len(self.file(f).read(other))
1871 f_len = len(self.file(f).read(other))
1871 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1872 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1872
1873
1873 remove.sort()
1874 remove.sort()
1874 for f in remove:
1875 for f in remove:
1875 self.ui.note(_("removing %s\n") % f)
1876 self.ui.note(_("removing %s\n") % f)
1876 util.audit_path(f)
1877 util.audit_path(f)
1877 try:
1878 try:
1878 util.unlink(self.wjoin(f))
1879 util.unlink(self.wjoin(f))
1879 except OSError, inst:
1880 except OSError, inst:
1880 if inst.errno != errno.ENOENT:
1881 if inst.errno != errno.ENOENT:
1881 self.ui.warn(_("update failed to remove %s: %s!\n") %
1882 self.ui.warn(_("update failed to remove %s: %s!\n") %
1882 (f, inst.strerror))
1883 (f, inst.strerror))
1883 if moddirstate:
1884 if moddirstate:
1884 if branch_merge:
1885 if branch_merge:
1885 self.dirstate.update(remove, 'r')
1886 self.dirstate.update(remove, 'r')
1886 else:
1887 else:
1887 self.dirstate.forget(remove)
1888 self.dirstate.forget(remove)
1888
1889
1889 if moddirstate:
1890 if moddirstate:
1890 self.dirstate.setparents(p1, p2)
1891 self.dirstate.setparents(p1, p2)
1891
1892
1892 if show_stats:
1893 if show_stats:
1893 stats = ((len(get), _("updated")),
1894 stats = ((len(get), _("updated")),
1894 (len(merge) - len(failedmerge), _("merged")),
1895 (len(merge) - len(failedmerge), _("merged")),
1895 (len(remove), _("removed")),
1896 (len(remove), _("removed")),
1896 (len(failedmerge), _("unresolved")))
1897 (len(failedmerge), _("unresolved")))
1897 note = ", ".join([_("%d files %s") % s for s in stats])
1898 note = ", ".join([_("%d files %s") % s for s in stats])
1898 self.ui.status("%s\n" % note)
1899 self.ui.status("%s\n" % note)
1899 if moddirstate:
1900 if moddirstate:
1900 if branch_merge:
1901 if branch_merge:
1901 if failedmerge:
1902 if failedmerge:
1902 self.ui.status(_("There are unresolved merges,"
1903 self.ui.status(_("There are unresolved merges,"
1903 " you can redo the full merge using:\n"
1904 " you can redo the full merge using:\n"
1904 " hg update -C %s\n"
1905 " hg update -C %s\n"
1905 " hg merge %s\n"
1906 " hg merge %s\n"
1906 % (self.changelog.rev(p1),
1907 % (self.changelog.rev(p1),
1907 self.changelog.rev(p2))))
1908 self.changelog.rev(p2))))
1908 else:
1909 else:
1909 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1910 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1910 elif failedmerge:
1911 elif failedmerge:
1911 self.ui.status(_("There are unresolved merges with"
1912 self.ui.status(_("There are unresolved merges with"
1912 " locally modified files.\n"))
1913 " locally modified files.\n"))
1913
1914
1914 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1915 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1915 return err
1916 return err
1916
1917
1917 def merge3(self, fn, my, other, p1, p2):
1918 def merge3(self, fn, my, other, p1, p2):
1918 """perform a 3-way merge in the working directory"""
1919 """perform a 3-way merge in the working directory"""
1919
1920
1920 def temp(prefix, node):
1921 def temp(prefix, node):
1921 pre = "%s~%s." % (os.path.basename(fn), prefix)
1922 pre = "%s~%s." % (os.path.basename(fn), prefix)
1922 (fd, name) = tempfile.mkstemp(prefix=pre)
1923 (fd, name) = tempfile.mkstemp(prefix=pre)
1923 f = os.fdopen(fd, "wb")
1924 f = os.fdopen(fd, "wb")
1924 self.wwrite(fn, fl.read(node), f)
1925 self.wwrite(fn, fl.read(node), f)
1925 f.close()
1926 f.close()
1926 return name
1927 return name
1927
1928
1928 fl = self.file(fn)
1929 fl = self.file(fn)
1929 base = fl.ancestor(my, other)
1930 base = fl.ancestor(my, other)
1930 a = self.wjoin(fn)
1931 a = self.wjoin(fn)
1931 b = temp("base", base)
1932 b = temp("base", base)
1932 c = temp("other", other)
1933 c = temp("other", other)
1933
1934
1934 self.ui.note(_("resolving %s\n") % fn)
1935 self.ui.note(_("resolving %s\n") % fn)
1935 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1936 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1936 (fn, short(my), short(other), short(base)))
1937 (fn, short(my), short(other), short(base)))
1937
1938
1938 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1939 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1939 or "hgmerge")
1940 or "hgmerge")
1940 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1941 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1941 environ={'HG_FILE': fn,
1942 environ={'HG_FILE': fn,
1942 'HG_MY_NODE': p1,
1943 'HG_MY_NODE': p1,
1943 'HG_OTHER_NODE': p2,
1944 'HG_OTHER_NODE': p2,
1944 'HG_FILE_MY_NODE': hex(my),
1945 'HG_FILE_MY_NODE': hex(my),
1945 'HG_FILE_OTHER_NODE': hex(other),
1946 'HG_FILE_OTHER_NODE': hex(other),
1946 'HG_FILE_BASE_NODE': hex(base)})
1947 'HG_FILE_BASE_NODE': hex(base)})
1947 if r:
1948 if r:
1948 self.ui.warn(_("merging %s failed!\n") % fn)
1949 self.ui.warn(_("merging %s failed!\n") % fn)
1949
1950
1950 os.unlink(b)
1951 os.unlink(b)
1951 os.unlink(c)
1952 os.unlink(c)
1952 return r
1953 return r
1953
1954
1954 def verify(self):
1955 def verify(self):
1955 filelinkrevs = {}
1956 filelinkrevs = {}
1956 filenodes = {}
1957 filenodes = {}
1957 changesets = revisions = files = 0
1958 changesets = revisions = files = 0
1958 errors = [0]
1959 errors = [0]
1959 warnings = [0]
1960 warnings = [0]
1960 neededmanifests = {}
1961 neededmanifests = {}
1961
1962
1962 def err(msg):
1963 def err(msg):
1963 self.ui.warn(msg + "\n")
1964 self.ui.warn(msg + "\n")
1964 errors[0] += 1
1965 errors[0] += 1
1965
1966
1966 def warn(msg):
1967 def warn(msg):
1967 self.ui.warn(msg + "\n")
1968 self.ui.warn(msg + "\n")
1968 warnings[0] += 1
1969 warnings[0] += 1
1969
1970
1970 def checksize(obj, name):
1971 def checksize(obj, name):
1971 d = obj.checksize()
1972 d = obj.checksize()
1972 if d[0]:
1973 if d[0]:
1973 err(_("%s data length off by %d bytes") % (name, d[0]))
1974 err(_("%s data length off by %d bytes") % (name, d[0]))
1974 if d[1]:
1975 if d[1]:
1975 err(_("%s index contains %d extra bytes") % (name, d[1]))
1976 err(_("%s index contains %d extra bytes") % (name, d[1]))
1976
1977
1977 def checkversion(obj, name):
1978 def checkversion(obj, name):
1978 if obj.version != revlog.REVLOGV0:
1979 if obj.version != revlog.REVLOGV0:
1979 if not revlogv1:
1980 if not revlogv1:
1980 warn(_("warning: `%s' uses revlog format 1") % name)
1981 warn(_("warning: `%s' uses revlog format 1") % name)
1981 elif revlogv1:
1982 elif revlogv1:
1982 warn(_("warning: `%s' uses revlog format 0") % name)
1983 warn(_("warning: `%s' uses revlog format 0") % name)
1983
1984
1984 revlogv1 = self.revlogversion != revlog.REVLOGV0
1985 revlogv1 = self.revlogversion != revlog.REVLOGV0
1985 if self.ui.verbose or revlogv1 != self.revlogv1:
1986 if self.ui.verbose or revlogv1 != self.revlogv1:
1986 self.ui.status(_("repository uses revlog format %d\n") %
1987 self.ui.status(_("repository uses revlog format %d\n") %
1987 (revlogv1 and 1 or 0))
1988 (revlogv1 and 1 or 0))
1988
1989
1989 seen = {}
1990 seen = {}
1990 self.ui.status(_("checking changesets\n"))
1991 self.ui.status(_("checking changesets\n"))
1991 checksize(self.changelog, "changelog")
1992 checksize(self.changelog, "changelog")
1992
1993
1993 for i in range(self.changelog.count()):
1994 for i in range(self.changelog.count()):
1994 changesets += 1
1995 changesets += 1
1995 n = self.changelog.node(i)
1996 n = self.changelog.node(i)
1996 l = self.changelog.linkrev(n)
1997 l = self.changelog.linkrev(n)
1997 if l != i:
1998 if l != i:
1998 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1999 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1999 if n in seen:
2000 if n in seen:
2000 err(_("duplicate changeset at revision %d") % i)
2001 err(_("duplicate changeset at revision %d") % i)
2001 seen[n] = 1
2002 seen[n] = 1
2002
2003
2003 for p in self.changelog.parents(n):
2004 for p in self.changelog.parents(n):
2004 if p not in self.changelog.nodemap:
2005 if p not in self.changelog.nodemap:
2005 err(_("changeset %s has unknown parent %s") %
2006 err(_("changeset %s has unknown parent %s") %
2006 (short(n), short(p)))
2007 (short(n), short(p)))
2007 try:
2008 try:
2008 changes = self.changelog.read(n)
2009 changes = self.changelog.read(n)
2009 except KeyboardInterrupt:
2010 except KeyboardInterrupt:
2010 self.ui.warn(_("interrupted"))
2011 self.ui.warn(_("interrupted"))
2011 raise
2012 raise
2012 except Exception, inst:
2013 except Exception, inst:
2013 err(_("unpacking changeset %s: %s") % (short(n), inst))
2014 err(_("unpacking changeset %s: %s") % (short(n), inst))
2014 continue
2015 continue
2015
2016
2016 neededmanifests[changes[0]] = n
2017 neededmanifests[changes[0]] = n
2017
2018
2018 for f in changes[3]:
2019 for f in changes[3]:
2019 filelinkrevs.setdefault(f, []).append(i)
2020 filelinkrevs.setdefault(f, []).append(i)
2020
2021
2021 seen = {}
2022 seen = {}
2022 self.ui.status(_("checking manifests\n"))
2023 self.ui.status(_("checking manifests\n"))
2023 checkversion(self.manifest, "manifest")
2024 checkversion(self.manifest, "manifest")
2024 checksize(self.manifest, "manifest")
2025 checksize(self.manifest, "manifest")
2025
2026
2026 for i in range(self.manifest.count()):
2027 for i in range(self.manifest.count()):
2027 n = self.manifest.node(i)
2028 n = self.manifest.node(i)
2028 l = self.manifest.linkrev(n)
2029 l = self.manifest.linkrev(n)
2029
2030
2030 if l < 0 or l >= self.changelog.count():
2031 if l < 0 or l >= self.changelog.count():
2031 err(_("bad manifest link (%d) at revision %d") % (l, i))
2032 err(_("bad manifest link (%d) at revision %d") % (l, i))
2032
2033
2033 if n in neededmanifests:
2034 if n in neededmanifests:
2034 del neededmanifests[n]
2035 del neededmanifests[n]
2035
2036
2036 if n in seen:
2037 if n in seen:
2037 err(_("duplicate manifest at revision %d") % i)
2038 err(_("duplicate manifest at revision %d") % i)
2038
2039
2039 seen[n] = 1
2040 seen[n] = 1
2040
2041
2041 for p in self.manifest.parents(n):
2042 for p in self.manifest.parents(n):
2042 if p not in self.manifest.nodemap:
2043 if p not in self.manifest.nodemap:
2043 err(_("manifest %s has unknown parent %s") %
2044 err(_("manifest %s has unknown parent %s") %
2044 (short(n), short(p)))
2045 (short(n), short(p)))
2045
2046
2046 try:
2047 try:
2047 delta = mdiff.patchtext(self.manifest.delta(n))
2048 delta = mdiff.patchtext(self.manifest.delta(n))
2048 except KeyboardInterrupt:
2049 except KeyboardInterrupt:
2049 self.ui.warn(_("interrupted"))
2050 self.ui.warn(_("interrupted"))
2050 raise
2051 raise
2051 except Exception, inst:
2052 except Exception, inst:
2052 err(_("unpacking manifest %s: %s") % (short(n), inst))
2053 err(_("unpacking manifest %s: %s") % (short(n), inst))
2053 continue
2054 continue
2054
2055
2055 try:
2056 try:
2056 ff = [ l.split('\0') for l in delta.splitlines() ]
2057 ff = [ l.split('\0') for l in delta.splitlines() ]
2057 for f, fn in ff:
2058 for f, fn in ff:
2058 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2059 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2059 except (ValueError, TypeError), inst:
2060 except (ValueError, TypeError), inst:
2060 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2061 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2061
2062
2062 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2063 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2063
2064
2064 for m, c in neededmanifests.items():
2065 for m, c in neededmanifests.items():
2065 err(_("Changeset %s refers to unknown manifest %s") %
2066 err(_("Changeset %s refers to unknown manifest %s") %
2066 (short(m), short(c)))
2067 (short(m), short(c)))
2067 del neededmanifests
2068 del neededmanifests
2068
2069
2069 for f in filenodes:
2070 for f in filenodes:
2070 if f not in filelinkrevs:
2071 if f not in filelinkrevs:
2071 err(_("file %s in manifest but not in changesets") % f)
2072 err(_("file %s in manifest but not in changesets") % f)
2072
2073
2073 for f in filelinkrevs:
2074 for f in filelinkrevs:
2074 if f not in filenodes:
2075 if f not in filenodes:
2075 err(_("file %s in changeset but not in manifest") % f)
2076 err(_("file %s in changeset but not in manifest") % f)
2076
2077
2077 self.ui.status(_("checking files\n"))
2078 self.ui.status(_("checking files\n"))
2078 ff = filenodes.keys()
2079 ff = filenodes.keys()
2079 ff.sort()
2080 ff.sort()
2080 for f in ff:
2081 for f in ff:
2081 if f == "/dev/null":
2082 if f == "/dev/null":
2082 continue
2083 continue
2083 files += 1
2084 files += 1
2084 if not f:
2085 if not f:
2085 err(_("file without name in manifest %s") % short(n))
2086 err(_("file without name in manifest %s") % short(n))
2086 continue
2087 continue
2087 fl = self.file(f)
2088 fl = self.file(f)
2088 checkversion(fl, f)
2089 checkversion(fl, f)
2089 checksize(fl, f)
2090 checksize(fl, f)
2090
2091
2091 nodes = {nullid: 1}
2092 nodes = {nullid: 1}
2092 seen = {}
2093 seen = {}
2093 for i in range(fl.count()):
2094 for i in range(fl.count()):
2094 revisions += 1
2095 revisions += 1
2095 n = fl.node(i)
2096 n = fl.node(i)
2096
2097
2097 if n in seen:
2098 if n in seen:
2098 err(_("%s: duplicate revision %d") % (f, i))
2099 err(_("%s: duplicate revision %d") % (f, i))
2099 if n not in filenodes[f]:
2100 if n not in filenodes[f]:
2100 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2101 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2101 else:
2102 else:
2102 del filenodes[f][n]
2103 del filenodes[f][n]
2103
2104
2104 flr = fl.linkrev(n)
2105 flr = fl.linkrev(n)
2105 if flr not in filelinkrevs.get(f, []):
2106 if flr not in filelinkrevs.get(f, []):
2106 err(_("%s:%s points to unexpected changeset %d")
2107 err(_("%s:%s points to unexpected changeset %d")
2107 % (f, short(n), flr))
2108 % (f, short(n), flr))
2108 else:
2109 else:
2109 filelinkrevs[f].remove(flr)
2110 filelinkrevs[f].remove(flr)
2110
2111
2111 # verify contents
2112 # verify contents
2112 try:
2113 try:
2113 t = fl.read(n)
2114 t = fl.read(n)
2114 except KeyboardInterrupt:
2115 except KeyboardInterrupt:
2115 self.ui.warn(_("interrupted"))
2116 self.ui.warn(_("interrupted"))
2116 raise
2117 raise
2117 except Exception, inst:
2118 except Exception, inst:
2118 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2119 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2119
2120
2120 # verify parents
2121 # verify parents
2121 (p1, p2) = fl.parents(n)
2122 (p1, p2) = fl.parents(n)
2122 if p1 not in nodes:
2123 if p1 not in nodes:
2123 err(_("file %s:%s unknown parent 1 %s") %
2124 err(_("file %s:%s unknown parent 1 %s") %
2124 (f, short(n), short(p1)))
2125 (f, short(n), short(p1)))
2125 if p2 not in nodes:
2126 if p2 not in nodes:
2126 err(_("file %s:%s unknown parent 2 %s") %
2127 err(_("file %s:%s unknown parent 2 %s") %
2127 (f, short(n), short(p1)))
2128 (f, short(n), short(p1)))
2128 nodes[n] = 1
2129 nodes[n] = 1
2129
2130
2130 # cross-check
2131 # cross-check
2131 for node in filenodes[f]:
2132 for node in filenodes[f]:
2132 err(_("node %s in manifests not in %s") % (hex(node), f))
2133 err(_("node %s in manifests not in %s") % (hex(node), f))
2133
2134
2134 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2135 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2135 (files, changesets, revisions))
2136 (files, changesets, revisions))
2136
2137
2137 if warnings[0]:
2138 if warnings[0]:
2138 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2139 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2139 if errors[0]:
2140 if errors[0]:
2140 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2141 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2141 return 1
2142 return 1
2142
2143
2143 # used to avoid circular references so destructors work
2144 # used to avoid circular references so destructors work
2144 def aftertrans(base):
2145 def aftertrans(base):
2145 p = base
2146 p = base
2146 def a():
2147 def a():
2147 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2148 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2148 util.rename(os.path.join(p, "journal.dirstate"),
2149 util.rename(os.path.join(p, "journal.dirstate"),
2149 os.path.join(p, "undo.dirstate"))
2150 os.path.join(p, "undo.dirstate"))
2150 return a
2151 return a
2151
2152
General Comments 0
You need to be logged in to leave comments. Login now