##// END OF EJS Templates
Clarified message when nothing to merge is seen....
Thomas Arendsen Hein -
r2548:0229ff95 default
parent child Browse files
Show More
@@ -1,2152 +1,2152 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 demandload(globals(), "appendfile changegroup")
11 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "changelog dirstate filelog manifest repo")
12 demandload(globals(), "changelog dirstate filelog manifest repo")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "os revlog util")
14 demandload(globals(), "os revlog util")
15
15
16 class localrepository(object):
16 class localrepository(object):
17 capabilities = ()
17 capabilities = ()
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 if not path:
22 if not path:
23 p = os.getcwd()
23 p = os.getcwd()
24 while not os.path.isdir(os.path.join(p, ".hg")):
24 while not os.path.isdir(os.path.join(p, ".hg")):
25 oldp = p
25 oldp = p
26 p = os.path.dirname(p)
26 p = os.path.dirname(p)
27 if p == oldp:
27 if p == oldp:
28 raise repo.RepoError(_("no repo found"))
28 raise repo.RepoError(_("no repo found"))
29 path = p
29 path = p
30 self.path = os.path.join(path, ".hg")
30 self.path = os.path.join(path, ".hg")
31
31
32 if not create and not os.path.isdir(self.path):
32 if not create and not os.path.isdir(self.path):
33 raise repo.RepoError(_("repository %s not found") % path)
33 raise repo.RepoError(_("repository %s not found") % path)
34
34
35 self.root = os.path.abspath(path)
35 self.root = os.path.abspath(path)
36 self.origroot = path
36 self.origroot = path
37 self.ui = ui.ui(parentui=parentui)
37 self.ui = ui.ui(parentui=parentui)
38 self.opener = util.opener(self.path)
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
39 self.wopener = util.opener(self.root)
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 v = self.ui.revlogopts
46 v = self.ui.revlogopts
47 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
48 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
49 fl = v.get('flags', None)
49 fl = v.get('flags', None)
50 flags = 0
50 flags = 0
51 if fl != None:
51 if fl != None:
52 for x in fl.split():
52 for x in fl.split():
53 flags |= revlog.flagstr(x)
53 flags |= revlog.flagstr(x)
54 elif self.revlogv1:
54 elif self.revlogv1:
55 flags = revlog.REVLOG_DEFAULT_FLAGS
55 flags = revlog.REVLOG_DEFAULT_FLAGS
56
56
57 v = self.revlogversion | flags
57 v = self.revlogversion | flags
58 self.manifest = manifest.manifest(self.opener, v)
58 self.manifest = manifest.manifest(self.opener, v)
59 self.changelog = changelog.changelog(self.opener, v)
59 self.changelog = changelog.changelog(self.opener, v)
60
60
61 # the changelog might not have the inline index flag
61 # the changelog might not have the inline index flag
62 # on. If the format of the changelog is the same as found in
62 # on. If the format of the changelog is the same as found in
63 # .hgrc, apply any flags found in the .hgrc as well.
63 # .hgrc, apply any flags found in the .hgrc as well.
64 # Otherwise, just version from the changelog
64 # Otherwise, just version from the changelog
65 v = self.changelog.version
65 v = self.changelog.version
66 if v == self.revlogversion:
66 if v == self.revlogversion:
67 v |= flags
67 v |= flags
68 self.revlogversion = v
68 self.revlogversion = v
69
69
70 self.tagscache = None
70 self.tagscache = None
71 self.nodetagscache = None
71 self.nodetagscache = None
72 self.encodepats = None
72 self.encodepats = None
73 self.decodepats = None
73 self.decodepats = None
74 self.transhandle = None
74 self.transhandle = None
75
75
76 if create:
76 if create:
77 os.mkdir(self.path)
77 os.mkdir(self.path)
78 os.mkdir(self.join("data"))
78 os.mkdir(self.join("data"))
79
79
80 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
81
81
82 def hook(self, name, throw=False, **args):
82 def hook(self, name, throw=False, **args):
83 def callhook(hname, funcname):
83 def callhook(hname, funcname):
84 '''call python hook. hook is callable object, looked up as
84 '''call python hook. hook is callable object, looked up as
85 name in python module. if callable returns "true", hook
85 name in python module. if callable returns "true", hook
86 fails, else passes. if hook raises exception, treated as
86 fails, else passes. if hook raises exception, treated as
87 hook failure. exception propagates if throw is "true".
87 hook failure. exception propagates if throw is "true".
88
88
89 reason for "true" meaning "hook failed" is so that
89 reason for "true" meaning "hook failed" is so that
90 unmodified commands (e.g. mercurial.commands.update) can
90 unmodified commands (e.g. mercurial.commands.update) can
91 be run as hooks without wrappers to convert return values.'''
91 be run as hooks without wrappers to convert return values.'''
92
92
93 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
94 d = funcname.rfind('.')
94 d = funcname.rfind('.')
95 if d == -1:
95 if d == -1:
96 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
97 % (hname, funcname))
97 % (hname, funcname))
98 modname = funcname[:d]
98 modname = funcname[:d]
99 try:
99 try:
100 obj = __import__(modname)
100 obj = __import__(modname)
101 except ImportError:
101 except ImportError:
102 raise util.Abort(_('%s hook is invalid '
102 raise util.Abort(_('%s hook is invalid '
103 '(import of "%s" failed)') %
103 '(import of "%s" failed)') %
104 (hname, modname))
104 (hname, modname))
105 try:
105 try:
106 for p in funcname.split('.')[1:]:
106 for p in funcname.split('.')[1:]:
107 obj = getattr(obj, p)
107 obj = getattr(obj, p)
108 except AttributeError, err:
108 except AttributeError, err:
109 raise util.Abort(_('%s hook is invalid '
109 raise util.Abort(_('%s hook is invalid '
110 '("%s" is not defined)') %
110 '("%s" is not defined)') %
111 (hname, funcname))
111 (hname, funcname))
112 if not callable(obj):
112 if not callable(obj):
113 raise util.Abort(_('%s hook is invalid '
113 raise util.Abort(_('%s hook is invalid '
114 '("%s" is not callable)') %
114 '("%s" is not callable)') %
115 (hname, funcname))
115 (hname, funcname))
116 try:
116 try:
117 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
118 except (KeyboardInterrupt, util.SignalInterrupt):
118 except (KeyboardInterrupt, util.SignalInterrupt):
119 raise
119 raise
120 except Exception, exc:
120 except Exception, exc:
121 if isinstance(exc, util.Abort):
121 if isinstance(exc, util.Abort):
122 self.ui.warn(_('error: %s hook failed: %s\n') %
122 self.ui.warn(_('error: %s hook failed: %s\n') %
123 (hname, exc.args[0] % exc.args[1:]))
123 (hname, exc.args[0] % exc.args[1:]))
124 else:
124 else:
125 self.ui.warn(_('error: %s hook raised an exception: '
125 self.ui.warn(_('error: %s hook raised an exception: '
126 '%s\n') % (hname, exc))
126 '%s\n') % (hname, exc))
127 if throw:
127 if throw:
128 raise
128 raise
129 self.ui.print_exc()
129 self.ui.print_exc()
130 return True
130 return True
131 if r:
131 if r:
132 if throw:
132 if throw:
133 raise util.Abort(_('%s hook failed') % hname)
133 raise util.Abort(_('%s hook failed') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 return r
135 return r
136
136
137 def runhook(name, cmd):
137 def runhook(name, cmd):
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
140 r = util.system(cmd, environ=env, cwd=self.root)
140 r = util.system(cmd, environ=env, cwd=self.root)
141 if r:
141 if r:
142 desc, r = util.explain_exit(r)
142 desc, r = util.explain_exit(r)
143 if throw:
143 if throw:
144 raise util.Abort(_('%s hook %s') % (name, desc))
144 raise util.Abort(_('%s hook %s') % (name, desc))
145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 return r
146 return r
147
147
148 r = False
148 r = False
149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 if hname.split(".", 1)[0] == name and cmd]
150 if hname.split(".", 1)[0] == name and cmd]
151 hooks.sort()
151 hooks.sort()
152 for hname, cmd in hooks:
152 for hname, cmd in hooks:
153 if cmd.startswith('python:'):
153 if cmd.startswith('python:'):
154 r = callhook(hname, cmd[7:].strip()) or r
154 r = callhook(hname, cmd[7:].strip()) or r
155 else:
155 else:
156 r = runhook(hname, cmd) or r
156 r = runhook(hname, cmd) or r
157 return r
157 return r
158
158
159 def tags(self):
159 def tags(self):
160 '''return a mapping of tag to node'''
160 '''return a mapping of tag to node'''
161 if not self.tagscache:
161 if not self.tagscache:
162 self.tagscache = {}
162 self.tagscache = {}
163
163
164 def parsetag(line, context):
164 def parsetag(line, context):
165 if not line:
165 if not line:
166 return
166 return
167 s = l.split(" ", 1)
167 s = l.split(" ", 1)
168 if len(s) != 2:
168 if len(s) != 2:
169 self.ui.warn(_("%s: cannot parse entry\n") % context)
169 self.ui.warn(_("%s: cannot parse entry\n") % context)
170 return
170 return
171 node, key = s
171 node, key = s
172 key = key.strip()
172 key = key.strip()
173 try:
173 try:
174 bin_n = bin(node)
174 bin_n = bin(node)
175 except TypeError:
175 except TypeError:
176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
177 (context, node))
177 (context, node))
178 return
178 return
179 if bin_n not in self.changelog.nodemap:
179 if bin_n not in self.changelog.nodemap:
180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
181 (context, key))
181 (context, key))
182 return
182 return
183 self.tagscache[key] = bin_n
183 self.tagscache[key] = bin_n
184
184
185 # read the tags file from each head, ending with the tip,
185 # read the tags file from each head, ending with the tip,
186 # and add each tag found to the map, with "newer" ones
186 # and add each tag found to the map, with "newer" ones
187 # taking precedence
187 # taking precedence
188 heads = self.heads()
188 heads = self.heads()
189 heads.reverse()
189 heads.reverse()
190 fl = self.file(".hgtags")
190 fl = self.file(".hgtags")
191 for node in heads:
191 for node in heads:
192 change = self.changelog.read(node)
192 change = self.changelog.read(node)
193 rev = self.changelog.rev(node)
193 rev = self.changelog.rev(node)
194 fn, ff = self.manifest.find(change[0], '.hgtags')
194 fn, ff = self.manifest.find(change[0], '.hgtags')
195 if fn is None: continue
195 if fn is None: continue
196 count = 0
196 count = 0
197 for l in fl.read(fn).splitlines():
197 for l in fl.read(fn).splitlines():
198 count += 1
198 count += 1
199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
200 (rev, short(node), count))
200 (rev, short(node), count))
201 try:
201 try:
202 f = self.opener("localtags")
202 f = self.opener("localtags")
203 count = 0
203 count = 0
204 for l in f:
204 for l in f:
205 count += 1
205 count += 1
206 parsetag(l, _("localtags, line %d") % count)
206 parsetag(l, _("localtags, line %d") % count)
207 except IOError:
207 except IOError:
208 pass
208 pass
209
209
210 self.tagscache['tip'] = self.changelog.tip()
210 self.tagscache['tip'] = self.changelog.tip()
211
211
212 return self.tagscache
212 return self.tagscache
213
213
214 def tagslist(self):
214 def tagslist(self):
215 '''return a list of tags ordered by revision'''
215 '''return a list of tags ordered by revision'''
216 l = []
216 l = []
217 for t, n in self.tags().items():
217 for t, n in self.tags().items():
218 try:
218 try:
219 r = self.changelog.rev(n)
219 r = self.changelog.rev(n)
220 except:
220 except:
221 r = -2 # sort to the beginning of the list if unknown
221 r = -2 # sort to the beginning of the list if unknown
222 l.append((r, t, n))
222 l.append((r, t, n))
223 l.sort()
223 l.sort()
224 return [(t, n) for r, t, n in l]
224 return [(t, n) for r, t, n in l]
225
225
226 def nodetags(self, node):
226 def nodetags(self, node):
227 '''return the tags associated with a node'''
227 '''return the tags associated with a node'''
228 if not self.nodetagscache:
228 if not self.nodetagscache:
229 self.nodetagscache = {}
229 self.nodetagscache = {}
230 for t, n in self.tags().items():
230 for t, n in self.tags().items():
231 self.nodetagscache.setdefault(n, []).append(t)
231 self.nodetagscache.setdefault(n, []).append(t)
232 return self.nodetagscache.get(node, [])
232 return self.nodetagscache.get(node, [])
233
233
234 def lookup(self, key):
234 def lookup(self, key):
235 try:
235 try:
236 return self.tags()[key]
236 return self.tags()[key]
237 except KeyError:
237 except KeyError:
238 try:
238 try:
239 return self.changelog.lookup(key)
239 return self.changelog.lookup(key)
240 except:
240 except:
241 raise repo.RepoError(_("unknown revision '%s'") % key)
241 raise repo.RepoError(_("unknown revision '%s'") % key)
242
242
243 def dev(self):
243 def dev(self):
244 return os.lstat(self.path).st_dev
244 return os.lstat(self.path).st_dev
245
245
246 def local(self):
246 def local(self):
247 return True
247 return True
248
248
249 def join(self, f):
249 def join(self, f):
250 return os.path.join(self.path, f)
250 return os.path.join(self.path, f)
251
251
252 def wjoin(self, f):
252 def wjoin(self, f):
253 return os.path.join(self.root, f)
253 return os.path.join(self.root, f)
254
254
255 def file(self, f):
255 def file(self, f):
256 if f[0] == '/':
256 if f[0] == '/':
257 f = f[1:]
257 f = f[1:]
258 return filelog.filelog(self.opener, f, self.revlogversion)
258 return filelog.filelog(self.opener, f, self.revlogversion)
259
259
260 def getcwd(self):
260 def getcwd(self):
261 return self.dirstate.getcwd()
261 return self.dirstate.getcwd()
262
262
263 def wfile(self, f, mode='r'):
263 def wfile(self, f, mode='r'):
264 return self.wopener(f, mode)
264 return self.wopener(f, mode)
265
265
266 def wread(self, filename):
266 def wread(self, filename):
267 if self.encodepats == None:
267 if self.encodepats == None:
268 l = []
268 l = []
269 for pat, cmd in self.ui.configitems("encode"):
269 for pat, cmd in self.ui.configitems("encode"):
270 mf = util.matcher(self.root, "", [pat], [], [])[1]
270 mf = util.matcher(self.root, "", [pat], [], [])[1]
271 l.append((mf, cmd))
271 l.append((mf, cmd))
272 self.encodepats = l
272 self.encodepats = l
273
273
274 data = self.wopener(filename, 'r').read()
274 data = self.wopener(filename, 'r').read()
275
275
276 for mf, cmd in self.encodepats:
276 for mf, cmd in self.encodepats:
277 if mf(filename):
277 if mf(filename):
278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
279 data = util.filter(data, cmd)
279 data = util.filter(data, cmd)
280 break
280 break
281
281
282 return data
282 return data
283
283
284 def wwrite(self, filename, data, fd=None):
284 def wwrite(self, filename, data, fd=None):
285 if self.decodepats == None:
285 if self.decodepats == None:
286 l = []
286 l = []
287 for pat, cmd in self.ui.configitems("decode"):
287 for pat, cmd in self.ui.configitems("decode"):
288 mf = util.matcher(self.root, "", [pat], [], [])[1]
288 mf = util.matcher(self.root, "", [pat], [], [])[1]
289 l.append((mf, cmd))
289 l.append((mf, cmd))
290 self.decodepats = l
290 self.decodepats = l
291
291
292 for mf, cmd in self.decodepats:
292 for mf, cmd in self.decodepats:
293 if mf(filename):
293 if mf(filename):
294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
295 data = util.filter(data, cmd)
295 data = util.filter(data, cmd)
296 break
296 break
297
297
298 if fd:
298 if fd:
299 return fd.write(data)
299 return fd.write(data)
300 return self.wopener(filename, 'w').write(data)
300 return self.wopener(filename, 'w').write(data)
301
301
302 def transaction(self):
302 def transaction(self):
303 tr = self.transhandle
303 tr = self.transhandle
304 if tr != None and tr.running():
304 if tr != None and tr.running():
305 return tr.nest()
305 return tr.nest()
306
306
307 # save dirstate for rollback
307 # save dirstate for rollback
308 try:
308 try:
309 ds = self.opener("dirstate").read()
309 ds = self.opener("dirstate").read()
310 except IOError:
310 except IOError:
311 ds = ""
311 ds = ""
312 self.opener("journal.dirstate", "w").write(ds)
312 self.opener("journal.dirstate", "w").write(ds)
313
313
314 tr = transaction.transaction(self.ui.warn, self.opener,
314 tr = transaction.transaction(self.ui.warn, self.opener,
315 self.join("journal"),
315 self.join("journal"),
316 aftertrans(self.path))
316 aftertrans(self.path))
317 self.transhandle = tr
317 self.transhandle = tr
318 return tr
318 return tr
319
319
320 def recover(self):
320 def recover(self):
321 l = self.lock()
321 l = self.lock()
322 if os.path.exists(self.join("journal")):
322 if os.path.exists(self.join("journal")):
323 self.ui.status(_("rolling back interrupted transaction\n"))
323 self.ui.status(_("rolling back interrupted transaction\n"))
324 transaction.rollback(self.opener, self.join("journal"))
324 transaction.rollback(self.opener, self.join("journal"))
325 self.reload()
325 self.reload()
326 return True
326 return True
327 else:
327 else:
328 self.ui.warn(_("no interrupted transaction available\n"))
328 self.ui.warn(_("no interrupted transaction available\n"))
329 return False
329 return False
330
330
331 def rollback(self, wlock=None):
331 def rollback(self, wlock=None):
332 if not wlock:
332 if not wlock:
333 wlock = self.wlock()
333 wlock = self.wlock()
334 l = self.lock()
334 l = self.lock()
335 if os.path.exists(self.join("undo")):
335 if os.path.exists(self.join("undo")):
336 self.ui.status(_("rolling back last transaction\n"))
336 self.ui.status(_("rolling back last transaction\n"))
337 transaction.rollback(self.opener, self.join("undo"))
337 transaction.rollback(self.opener, self.join("undo"))
338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
339 self.reload()
339 self.reload()
340 self.wreload()
340 self.wreload()
341 else:
341 else:
342 self.ui.warn(_("no rollback information available\n"))
342 self.ui.warn(_("no rollback information available\n"))
343
343
344 def wreload(self):
344 def wreload(self):
345 self.dirstate.read()
345 self.dirstate.read()
346
346
347 def reload(self):
347 def reload(self):
348 self.changelog.load()
348 self.changelog.load()
349 self.manifest.load()
349 self.manifest.load()
350 self.tagscache = None
350 self.tagscache = None
351 self.nodetagscache = None
351 self.nodetagscache = None
352
352
353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
354 desc=None):
354 desc=None):
355 try:
355 try:
356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
357 except lock.LockHeld, inst:
357 except lock.LockHeld, inst:
358 if not wait:
358 if not wait:
359 raise
359 raise
360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
361 (desc, inst.args[0]))
361 (desc, inst.args[0]))
362 # default to 600 seconds timeout
362 # default to 600 seconds timeout
363 l = lock.lock(self.join(lockname),
363 l = lock.lock(self.join(lockname),
364 int(self.ui.config("ui", "timeout") or 600),
364 int(self.ui.config("ui", "timeout") or 600),
365 releasefn, desc=desc)
365 releasefn, desc=desc)
366 if acquirefn:
366 if acquirefn:
367 acquirefn()
367 acquirefn()
368 return l
368 return l
369
369
370 def lock(self, wait=1):
370 def lock(self, wait=1):
371 return self.do_lock("lock", wait, acquirefn=self.reload,
371 return self.do_lock("lock", wait, acquirefn=self.reload,
372 desc=_('repository %s') % self.origroot)
372 desc=_('repository %s') % self.origroot)
373
373
374 def wlock(self, wait=1):
374 def wlock(self, wait=1):
375 return self.do_lock("wlock", wait, self.dirstate.write,
375 return self.do_lock("wlock", wait, self.dirstate.write,
376 self.wreload,
376 self.wreload,
377 desc=_('working directory of %s') % self.origroot)
377 desc=_('working directory of %s') % self.origroot)
378
378
379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
380 "determine whether a new filenode is needed"
380 "determine whether a new filenode is needed"
381 fp1 = manifest1.get(filename, nullid)
381 fp1 = manifest1.get(filename, nullid)
382 fp2 = manifest2.get(filename, nullid)
382 fp2 = manifest2.get(filename, nullid)
383
383
384 if fp2 != nullid:
384 if fp2 != nullid:
385 # is one parent an ancestor of the other?
385 # is one parent an ancestor of the other?
386 fpa = filelog.ancestor(fp1, fp2)
386 fpa = filelog.ancestor(fp1, fp2)
387 if fpa == fp1:
387 if fpa == fp1:
388 fp1, fp2 = fp2, nullid
388 fp1, fp2 = fp2, nullid
389 elif fpa == fp2:
389 elif fpa == fp2:
390 fp2 = nullid
390 fp2 = nullid
391
391
392 # is the file unmodified from the parent? report existing entry
392 # is the file unmodified from the parent? report existing entry
393 if fp2 == nullid and text == filelog.read(fp1):
393 if fp2 == nullid and text == filelog.read(fp1):
394 return (fp1, None, None)
394 return (fp1, None, None)
395
395
396 return (None, fp1, fp2)
396 return (None, fp1, fp2)
397
397
398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
399 orig_parent = self.dirstate.parents()[0] or nullid
399 orig_parent = self.dirstate.parents()[0] or nullid
400 p1 = p1 or self.dirstate.parents()[0] or nullid
400 p1 = p1 or self.dirstate.parents()[0] or nullid
401 p2 = p2 or self.dirstate.parents()[1] or nullid
401 p2 = p2 or self.dirstate.parents()[1] or nullid
402 c1 = self.changelog.read(p1)
402 c1 = self.changelog.read(p1)
403 c2 = self.changelog.read(p2)
403 c2 = self.changelog.read(p2)
404 m1 = self.manifest.read(c1[0])
404 m1 = self.manifest.read(c1[0])
405 mf1 = self.manifest.readflags(c1[0])
405 mf1 = self.manifest.readflags(c1[0])
406 m2 = self.manifest.read(c2[0])
406 m2 = self.manifest.read(c2[0])
407 changed = []
407 changed = []
408
408
409 if orig_parent == p1:
409 if orig_parent == p1:
410 update_dirstate = 1
410 update_dirstate = 1
411 else:
411 else:
412 update_dirstate = 0
412 update_dirstate = 0
413
413
414 if not wlock:
414 if not wlock:
415 wlock = self.wlock()
415 wlock = self.wlock()
416 l = self.lock()
416 l = self.lock()
417 tr = self.transaction()
417 tr = self.transaction()
418 mm = m1.copy()
418 mm = m1.copy()
419 mfm = mf1.copy()
419 mfm = mf1.copy()
420 linkrev = self.changelog.count()
420 linkrev = self.changelog.count()
421 for f in files:
421 for f in files:
422 try:
422 try:
423 t = self.wread(f)
423 t = self.wread(f)
424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
425 r = self.file(f)
425 r = self.file(f)
426 mfm[f] = tm
426 mfm[f] = tm
427
427
428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
429 if entry:
429 if entry:
430 mm[f] = entry
430 mm[f] = entry
431 continue
431 continue
432
432
433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
434 changed.append(f)
434 changed.append(f)
435 if update_dirstate:
435 if update_dirstate:
436 self.dirstate.update([f], "n")
436 self.dirstate.update([f], "n")
437 except IOError:
437 except IOError:
438 try:
438 try:
439 del mm[f]
439 del mm[f]
440 del mfm[f]
440 del mfm[f]
441 if update_dirstate:
441 if update_dirstate:
442 self.dirstate.forget([f])
442 self.dirstate.forget([f])
443 except:
443 except:
444 # deleted from p2?
444 # deleted from p2?
445 pass
445 pass
446
446
447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
448 user = user or self.ui.username()
448 user = user or self.ui.username()
449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
450 tr.close()
450 tr.close()
451 if update_dirstate:
451 if update_dirstate:
452 self.dirstate.setparents(n, nullid)
452 self.dirstate.setparents(n, nullid)
453
453
454 def commit(self, files=None, text="", user=None, date=None,
454 def commit(self, files=None, text="", user=None, date=None,
455 match=util.always, force=False, lock=None, wlock=None,
455 match=util.always, force=False, lock=None, wlock=None,
456 force_editor=False):
456 force_editor=False):
457 commit = []
457 commit = []
458 remove = []
458 remove = []
459 changed = []
459 changed = []
460
460
461 if files:
461 if files:
462 for f in files:
462 for f in files:
463 s = self.dirstate.state(f)
463 s = self.dirstate.state(f)
464 if s in 'nmai':
464 if s in 'nmai':
465 commit.append(f)
465 commit.append(f)
466 elif s == 'r':
466 elif s == 'r':
467 remove.append(f)
467 remove.append(f)
468 else:
468 else:
469 self.ui.warn(_("%s not tracked!\n") % f)
469 self.ui.warn(_("%s not tracked!\n") % f)
470 else:
470 else:
471 modified, added, removed, deleted, unknown = self.changes(match=match)
471 modified, added, removed, deleted, unknown = self.changes(match=match)
472 commit = modified + added
472 commit = modified + added
473 remove = removed
473 remove = removed
474
474
475 p1, p2 = self.dirstate.parents()
475 p1, p2 = self.dirstate.parents()
476 c1 = self.changelog.read(p1)
476 c1 = self.changelog.read(p1)
477 c2 = self.changelog.read(p2)
477 c2 = self.changelog.read(p2)
478 m1 = self.manifest.read(c1[0])
478 m1 = self.manifest.read(c1[0])
479 mf1 = self.manifest.readflags(c1[0])
479 mf1 = self.manifest.readflags(c1[0])
480 m2 = self.manifest.read(c2[0])
480 m2 = self.manifest.read(c2[0])
481
481
482 if not commit and not remove and not force and p2 == nullid:
482 if not commit and not remove and not force and p2 == nullid:
483 self.ui.status(_("nothing changed\n"))
483 self.ui.status(_("nothing changed\n"))
484 return None
484 return None
485
485
486 xp1 = hex(p1)
486 xp1 = hex(p1)
487 if p2 == nullid: xp2 = ''
487 if p2 == nullid: xp2 = ''
488 else: xp2 = hex(p2)
488 else: xp2 = hex(p2)
489
489
490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
491
491
492 if not wlock:
492 if not wlock:
493 wlock = self.wlock()
493 wlock = self.wlock()
494 if not lock:
494 if not lock:
495 lock = self.lock()
495 lock = self.lock()
496 tr = self.transaction()
496 tr = self.transaction()
497
497
498 # check in files
498 # check in files
499 new = {}
499 new = {}
500 linkrev = self.changelog.count()
500 linkrev = self.changelog.count()
501 commit.sort()
501 commit.sort()
502 for f in commit:
502 for f in commit:
503 self.ui.note(f + "\n")
503 self.ui.note(f + "\n")
504 try:
504 try:
505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
506 t = self.wread(f)
506 t = self.wread(f)
507 except IOError:
507 except IOError:
508 self.ui.warn(_("trouble committing %s!\n") % f)
508 self.ui.warn(_("trouble committing %s!\n") % f)
509 raise
509 raise
510
510
511 r = self.file(f)
511 r = self.file(f)
512
512
513 meta = {}
513 meta = {}
514 cp = self.dirstate.copied(f)
514 cp = self.dirstate.copied(f)
515 if cp:
515 if cp:
516 meta["copy"] = cp
516 meta["copy"] = cp
517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
519 fp1, fp2 = nullid, nullid
519 fp1, fp2 = nullid, nullid
520 else:
520 else:
521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
522 if entry:
522 if entry:
523 new[f] = entry
523 new[f] = entry
524 continue
524 continue
525
525
526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
527 # remember what we've added so that we can later calculate
527 # remember what we've added so that we can later calculate
528 # the files to pull from a set of changesets
528 # the files to pull from a set of changesets
529 changed.append(f)
529 changed.append(f)
530
530
531 # update manifest
531 # update manifest
532 m1 = m1.copy()
532 m1 = m1.copy()
533 m1.update(new)
533 m1.update(new)
534 for f in remove:
534 for f in remove:
535 if f in m1:
535 if f in m1:
536 del m1[f]
536 del m1[f]
537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
538 (new, remove))
538 (new, remove))
539
539
540 # add changeset
540 # add changeset
541 new = new.keys()
541 new = new.keys()
542 new.sort()
542 new.sort()
543
543
544 user = user or self.ui.username()
544 user = user or self.ui.username()
545 if not text or force_editor:
545 if not text or force_editor:
546 edittext = []
546 edittext = []
547 if text:
547 if text:
548 edittext.append(text)
548 edittext.append(text)
549 edittext.append("")
549 edittext.append("")
550 if p2 != nullid:
550 if p2 != nullid:
551 edittext.append("HG: branch merge")
551 edittext.append("HG: branch merge")
552 edittext.extend(["HG: changed %s" % f for f in changed])
552 edittext.extend(["HG: changed %s" % f for f in changed])
553 edittext.extend(["HG: removed %s" % f for f in remove])
553 edittext.extend(["HG: removed %s" % f for f in remove])
554 if not changed and not remove:
554 if not changed and not remove:
555 edittext.append("HG: no files changed")
555 edittext.append("HG: no files changed")
556 edittext.append("")
556 edittext.append("")
557 # run editor in the repository root
557 # run editor in the repository root
558 olddir = os.getcwd()
558 olddir = os.getcwd()
559 os.chdir(self.root)
559 os.chdir(self.root)
560 text = self.ui.edit("\n".join(edittext), user)
560 text = self.ui.edit("\n".join(edittext), user)
561 os.chdir(olddir)
561 os.chdir(olddir)
562
562
563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
564 while lines and not lines[0]:
564 while lines and not lines[0]:
565 del lines[0]
565 del lines[0]
566 if not lines:
566 if not lines:
567 return None
567 return None
568 text = '\n'.join(lines)
568 text = '\n'.join(lines)
569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
571 parent2=xp2)
571 parent2=xp2)
572 tr.close()
572 tr.close()
573
573
574 self.dirstate.setparents(n)
574 self.dirstate.setparents(n)
575 self.dirstate.update(new, "n")
575 self.dirstate.update(new, "n")
576 self.dirstate.forget(remove)
576 self.dirstate.forget(remove)
577
577
578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
579 return n
579 return n
580
580
581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
582 if node:
582 if node:
583 fdict = dict.fromkeys(files)
583 fdict = dict.fromkeys(files)
584 for fn in self.manifest.read(self.changelog.read(node)[0]):
584 for fn in self.manifest.read(self.changelog.read(node)[0]):
585 fdict.pop(fn, None)
585 fdict.pop(fn, None)
586 if match(fn):
586 if match(fn):
587 yield 'm', fn
587 yield 'm', fn
588 for fn in fdict:
588 for fn in fdict:
589 if badmatch and badmatch(fn):
589 if badmatch and badmatch(fn):
590 if match(fn):
590 if match(fn):
591 yield 'b', fn
591 yield 'b', fn
592 else:
592 else:
593 self.ui.warn(_('%s: No such file in rev %s\n') % (
593 self.ui.warn(_('%s: No such file in rev %s\n') % (
594 util.pathto(self.getcwd(), fn), short(node)))
594 util.pathto(self.getcwd(), fn), short(node)))
595 else:
595 else:
596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
597 yield src, fn
597 yield src, fn
598
598
599 def changes(self, node1=None, node2=None, files=[], match=util.always,
599 def changes(self, node1=None, node2=None, files=[], match=util.always,
600 wlock=None, show_ignored=None):
600 wlock=None, show_ignored=None):
601 """return changes between two nodes or node and working directory
601 """return changes between two nodes or node and working directory
602
602
603 If node1 is None, use the first dirstate parent instead.
603 If node1 is None, use the first dirstate parent instead.
604 If node2 is None, compare node1 with working directory.
604 If node2 is None, compare node1 with working directory.
605 """
605 """
606
606
607 def fcmp(fn, mf):
607 def fcmp(fn, mf):
608 t1 = self.wread(fn)
608 t1 = self.wread(fn)
609 t2 = self.file(fn).read(mf.get(fn, nullid))
609 t2 = self.file(fn).read(mf.get(fn, nullid))
610 return cmp(t1, t2)
610 return cmp(t1, t2)
611
611
612 def mfmatches(node):
612 def mfmatches(node):
613 change = self.changelog.read(node)
613 change = self.changelog.read(node)
614 mf = dict(self.manifest.read(change[0]))
614 mf = dict(self.manifest.read(change[0]))
615 for fn in mf.keys():
615 for fn in mf.keys():
616 if not match(fn):
616 if not match(fn):
617 del mf[fn]
617 del mf[fn]
618 return mf
618 return mf
619
619
620 modified, added, removed, deleted, unknown, ignored = [],[],[],[],[],[]
620 modified, added, removed, deleted, unknown, ignored = [],[],[],[],[],[]
621 compareworking = False
621 compareworking = False
622 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
622 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
623 compareworking = True
623 compareworking = True
624
624
625 if not compareworking:
625 if not compareworking:
626 # read the manifest from node1 before the manifest from node2,
626 # read the manifest from node1 before the manifest from node2,
627 # so that we'll hit the manifest cache if we're going through
627 # so that we'll hit the manifest cache if we're going through
628 # all the revisions in parent->child order.
628 # all the revisions in parent->child order.
629 mf1 = mfmatches(node1)
629 mf1 = mfmatches(node1)
630
630
631 # are we comparing the working directory?
631 # are we comparing the working directory?
632 if not node2:
632 if not node2:
633 if not wlock:
633 if not wlock:
634 try:
634 try:
635 wlock = self.wlock(wait=0)
635 wlock = self.wlock(wait=0)
636 except lock.LockException:
636 except lock.LockException:
637 wlock = None
637 wlock = None
638 lookup, modified, added, removed, deleted, unknown, ignored = (
638 lookup, modified, added, removed, deleted, unknown, ignored = (
639 self.dirstate.changes(files, match, show_ignored))
639 self.dirstate.changes(files, match, show_ignored))
640
640
641 # are we comparing working dir against its parent?
641 # are we comparing working dir against its parent?
642 if compareworking:
642 if compareworking:
643 if lookup:
643 if lookup:
644 # do a full compare of any files that might have changed
644 # do a full compare of any files that might have changed
645 mf2 = mfmatches(self.dirstate.parents()[0])
645 mf2 = mfmatches(self.dirstate.parents()[0])
646 for f in lookup:
646 for f in lookup:
647 if fcmp(f, mf2):
647 if fcmp(f, mf2):
648 modified.append(f)
648 modified.append(f)
649 elif wlock is not None:
649 elif wlock is not None:
650 self.dirstate.update([f], "n")
650 self.dirstate.update([f], "n")
651 else:
651 else:
652 # we are comparing working dir against non-parent
652 # we are comparing working dir against non-parent
653 # generate a pseudo-manifest for the working dir
653 # generate a pseudo-manifest for the working dir
654 mf2 = mfmatches(self.dirstate.parents()[0])
654 mf2 = mfmatches(self.dirstate.parents()[0])
655 for f in lookup + modified + added:
655 for f in lookup + modified + added:
656 mf2[f] = ""
656 mf2[f] = ""
657 for f in removed:
657 for f in removed:
658 if f in mf2:
658 if f in mf2:
659 del mf2[f]
659 del mf2[f]
660 else:
660 else:
661 # we are comparing two revisions
661 # we are comparing two revisions
662 deleted, unknown, ignored = [], [], []
662 deleted, unknown, ignored = [], [], []
663 mf2 = mfmatches(node2)
663 mf2 = mfmatches(node2)
664
664
665 if not compareworking:
665 if not compareworking:
666 # flush lists from dirstate before comparing manifests
666 # flush lists from dirstate before comparing manifests
667 modified, added = [], []
667 modified, added = [], []
668
668
669 # make sure to sort the files so we talk to the disk in a
669 # make sure to sort the files so we talk to the disk in a
670 # reasonable order
670 # reasonable order
671 mf2keys = mf2.keys()
671 mf2keys = mf2.keys()
672 mf2keys.sort()
672 mf2keys.sort()
673 for fn in mf2keys:
673 for fn in mf2keys:
674 if mf1.has_key(fn):
674 if mf1.has_key(fn):
675 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
675 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
676 modified.append(fn)
676 modified.append(fn)
677 del mf1[fn]
677 del mf1[fn]
678 else:
678 else:
679 added.append(fn)
679 added.append(fn)
680
680
681 removed = mf1.keys()
681 removed = mf1.keys()
682
682
683 # sort and return results:
683 # sort and return results:
684 for l in modified, added, removed, deleted, unknown, ignored:
684 for l in modified, added, removed, deleted, unknown, ignored:
685 l.sort()
685 l.sort()
686 if show_ignored is None:
686 if show_ignored is None:
687 return (modified, added, removed, deleted, unknown)
687 return (modified, added, removed, deleted, unknown)
688 else:
688 else:
689 return (modified, added, removed, deleted, unknown, ignored)
689 return (modified, added, removed, deleted, unknown, ignored)
690
690
691 def add(self, list, wlock=None):
691 def add(self, list, wlock=None):
692 if not wlock:
692 if not wlock:
693 wlock = self.wlock()
693 wlock = self.wlock()
694 for f in list:
694 for f in list:
695 p = self.wjoin(f)
695 p = self.wjoin(f)
696 if not os.path.exists(p):
696 if not os.path.exists(p):
697 self.ui.warn(_("%s does not exist!\n") % f)
697 self.ui.warn(_("%s does not exist!\n") % f)
698 elif not os.path.isfile(p):
698 elif not os.path.isfile(p):
699 self.ui.warn(_("%s not added: only files supported currently\n")
699 self.ui.warn(_("%s not added: only files supported currently\n")
700 % f)
700 % f)
701 elif self.dirstate.state(f) in 'an':
701 elif self.dirstate.state(f) in 'an':
702 self.ui.warn(_("%s already tracked!\n") % f)
702 self.ui.warn(_("%s already tracked!\n") % f)
703 else:
703 else:
704 self.dirstate.update([f], "a")
704 self.dirstate.update([f], "a")
705
705
706 def forget(self, list, wlock=None):
706 def forget(self, list, wlock=None):
707 if not wlock:
707 if not wlock:
708 wlock = self.wlock()
708 wlock = self.wlock()
709 for f in list:
709 for f in list:
710 if self.dirstate.state(f) not in 'ai':
710 if self.dirstate.state(f) not in 'ai':
711 self.ui.warn(_("%s not added!\n") % f)
711 self.ui.warn(_("%s not added!\n") % f)
712 else:
712 else:
713 self.dirstate.forget([f])
713 self.dirstate.forget([f])
714
714
715 def remove(self, list, unlink=False, wlock=None):
715 def remove(self, list, unlink=False, wlock=None):
716 if unlink:
716 if unlink:
717 for f in list:
717 for f in list:
718 try:
718 try:
719 util.unlink(self.wjoin(f))
719 util.unlink(self.wjoin(f))
720 except OSError, inst:
720 except OSError, inst:
721 if inst.errno != errno.ENOENT:
721 if inst.errno != errno.ENOENT:
722 raise
722 raise
723 if not wlock:
723 if not wlock:
724 wlock = self.wlock()
724 wlock = self.wlock()
725 for f in list:
725 for f in list:
726 p = self.wjoin(f)
726 p = self.wjoin(f)
727 if os.path.exists(p):
727 if os.path.exists(p):
728 self.ui.warn(_("%s still exists!\n") % f)
728 self.ui.warn(_("%s still exists!\n") % f)
729 elif self.dirstate.state(f) == 'a':
729 elif self.dirstate.state(f) == 'a':
730 self.dirstate.forget([f])
730 self.dirstate.forget([f])
731 elif f not in self.dirstate:
731 elif f not in self.dirstate:
732 self.ui.warn(_("%s not tracked!\n") % f)
732 self.ui.warn(_("%s not tracked!\n") % f)
733 else:
733 else:
734 self.dirstate.update([f], "r")
734 self.dirstate.update([f], "r")
735
735
736 def undelete(self, list, wlock=None):
736 def undelete(self, list, wlock=None):
737 p = self.dirstate.parents()[0]
737 p = self.dirstate.parents()[0]
738 mn = self.changelog.read(p)[0]
738 mn = self.changelog.read(p)[0]
739 mf = self.manifest.readflags(mn)
739 mf = self.manifest.readflags(mn)
740 m = self.manifest.read(mn)
740 m = self.manifest.read(mn)
741 if not wlock:
741 if not wlock:
742 wlock = self.wlock()
742 wlock = self.wlock()
743 for f in list:
743 for f in list:
744 if self.dirstate.state(f) not in "r":
744 if self.dirstate.state(f) not in "r":
745 self.ui.warn("%s not removed!\n" % f)
745 self.ui.warn("%s not removed!\n" % f)
746 else:
746 else:
747 t = self.file(f).read(m[f])
747 t = self.file(f).read(m[f])
748 self.wwrite(f, t)
748 self.wwrite(f, t)
749 util.set_exec(self.wjoin(f), mf[f])
749 util.set_exec(self.wjoin(f), mf[f])
750 self.dirstate.update([f], "n")
750 self.dirstate.update([f], "n")
751
751
752 def copy(self, source, dest, wlock=None):
752 def copy(self, source, dest, wlock=None):
753 p = self.wjoin(dest)
753 p = self.wjoin(dest)
754 if not os.path.exists(p):
754 if not os.path.exists(p):
755 self.ui.warn(_("%s does not exist!\n") % dest)
755 self.ui.warn(_("%s does not exist!\n") % dest)
756 elif not os.path.isfile(p):
756 elif not os.path.isfile(p):
757 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
757 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
758 else:
758 else:
759 if not wlock:
759 if not wlock:
760 wlock = self.wlock()
760 wlock = self.wlock()
761 if self.dirstate.state(dest) == '?':
761 if self.dirstate.state(dest) == '?':
762 self.dirstate.update([dest], "a")
762 self.dirstate.update([dest], "a")
763 self.dirstate.copy(source, dest)
763 self.dirstate.copy(source, dest)
764
764
765 def heads(self, start=None):
765 def heads(self, start=None):
766 heads = self.changelog.heads(start)
766 heads = self.changelog.heads(start)
767 # sort the output in rev descending order
767 # sort the output in rev descending order
768 heads = [(-self.changelog.rev(h), h) for h in heads]
768 heads = [(-self.changelog.rev(h), h) for h in heads]
769 heads.sort()
769 heads.sort()
770 return [n for (r, n) in heads]
770 return [n for (r, n) in heads]
771
771
772 # branchlookup returns a dict giving a list of branches for
772 # branchlookup returns a dict giving a list of branches for
773 # each head. A branch is defined as the tag of a node or
773 # each head. A branch is defined as the tag of a node or
774 # the branch of the node's parents. If a node has multiple
774 # the branch of the node's parents. If a node has multiple
775 # branch tags, tags are eliminated if they are visible from other
775 # branch tags, tags are eliminated if they are visible from other
776 # branch tags.
776 # branch tags.
777 #
777 #
778 # So, for this graph: a->b->c->d->e
778 # So, for this graph: a->b->c->d->e
779 # \ /
779 # \ /
780 # aa -----/
780 # aa -----/
781 # a has tag 2.6.12
781 # a has tag 2.6.12
782 # d has tag 2.6.13
782 # d has tag 2.6.13
783 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
783 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
784 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
784 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
785 # from the list.
785 # from the list.
786 #
786 #
787 # It is possible that more than one head will have the same branch tag.
787 # It is possible that more than one head will have the same branch tag.
788 # callers need to check the result for multiple heads under the same
788 # callers need to check the result for multiple heads under the same
789 # branch tag if that is a problem for them (ie checkout of a specific
789 # branch tag if that is a problem for them (ie checkout of a specific
790 # branch).
790 # branch).
791 #
791 #
792 # passing in a specific branch will limit the depth of the search
792 # passing in a specific branch will limit the depth of the search
793 # through the parents. It won't limit the branches returned in the
793 # through the parents. It won't limit the branches returned in the
794 # result though.
794 # result though.
795 def branchlookup(self, heads=None, branch=None):
795 def branchlookup(self, heads=None, branch=None):
796 if not heads:
796 if not heads:
797 heads = self.heads()
797 heads = self.heads()
798 headt = [ h for h in heads ]
798 headt = [ h for h in heads ]
799 chlog = self.changelog
799 chlog = self.changelog
800 branches = {}
800 branches = {}
801 merges = []
801 merges = []
802 seenmerge = {}
802 seenmerge = {}
803
803
804 # traverse the tree once for each head, recording in the branches
804 # traverse the tree once for each head, recording in the branches
805 # dict which tags are visible from this head. The branches
805 # dict which tags are visible from this head. The branches
806 # dict also records which tags are visible from each tag
806 # dict also records which tags are visible from each tag
807 # while we traverse.
807 # while we traverse.
808 while headt or merges:
808 while headt or merges:
809 if merges:
809 if merges:
810 n, found = merges.pop()
810 n, found = merges.pop()
811 visit = [n]
811 visit = [n]
812 else:
812 else:
813 h = headt.pop()
813 h = headt.pop()
814 visit = [h]
814 visit = [h]
815 found = [h]
815 found = [h]
816 seen = {}
816 seen = {}
817 while visit:
817 while visit:
818 n = visit.pop()
818 n = visit.pop()
819 if n in seen:
819 if n in seen:
820 continue
820 continue
821 pp = chlog.parents(n)
821 pp = chlog.parents(n)
822 tags = self.nodetags(n)
822 tags = self.nodetags(n)
823 if tags:
823 if tags:
824 for x in tags:
824 for x in tags:
825 if x == 'tip':
825 if x == 'tip':
826 continue
826 continue
827 for f in found:
827 for f in found:
828 branches.setdefault(f, {})[n] = 1
828 branches.setdefault(f, {})[n] = 1
829 branches.setdefault(n, {})[n] = 1
829 branches.setdefault(n, {})[n] = 1
830 break
830 break
831 if n not in found:
831 if n not in found:
832 found.append(n)
832 found.append(n)
833 if branch in tags:
833 if branch in tags:
834 continue
834 continue
835 seen[n] = 1
835 seen[n] = 1
836 if pp[1] != nullid and n not in seenmerge:
836 if pp[1] != nullid and n not in seenmerge:
837 merges.append((pp[1], [x for x in found]))
837 merges.append((pp[1], [x for x in found]))
838 seenmerge[n] = 1
838 seenmerge[n] = 1
839 if pp[0] != nullid:
839 if pp[0] != nullid:
840 visit.append(pp[0])
840 visit.append(pp[0])
841 # traverse the branches dict, eliminating branch tags from each
841 # traverse the branches dict, eliminating branch tags from each
842 # head that are visible from another branch tag for that head.
842 # head that are visible from another branch tag for that head.
843 out = {}
843 out = {}
844 viscache = {}
844 viscache = {}
845 for h in heads:
845 for h in heads:
846 def visible(node):
846 def visible(node):
847 if node in viscache:
847 if node in viscache:
848 return viscache[node]
848 return viscache[node]
849 ret = {}
849 ret = {}
850 visit = [node]
850 visit = [node]
851 while visit:
851 while visit:
852 x = visit.pop()
852 x = visit.pop()
853 if x in viscache:
853 if x in viscache:
854 ret.update(viscache[x])
854 ret.update(viscache[x])
855 elif x not in ret:
855 elif x not in ret:
856 ret[x] = 1
856 ret[x] = 1
857 if x in branches:
857 if x in branches:
858 visit[len(visit):] = branches[x].keys()
858 visit[len(visit):] = branches[x].keys()
859 viscache[node] = ret
859 viscache[node] = ret
860 return ret
860 return ret
861 if h not in branches:
861 if h not in branches:
862 continue
862 continue
863 # O(n^2), but somewhat limited. This only searches the
863 # O(n^2), but somewhat limited. This only searches the
864 # tags visible from a specific head, not all the tags in the
864 # tags visible from a specific head, not all the tags in the
865 # whole repo.
865 # whole repo.
866 for b in branches[h]:
866 for b in branches[h]:
867 vis = False
867 vis = False
868 for bb in branches[h].keys():
868 for bb in branches[h].keys():
869 if b != bb:
869 if b != bb:
870 if b in visible(bb):
870 if b in visible(bb):
871 vis = True
871 vis = True
872 break
872 break
873 if not vis:
873 if not vis:
874 l = out.setdefault(h, [])
874 l = out.setdefault(h, [])
875 l[len(l):] = self.nodetags(b)
875 l[len(l):] = self.nodetags(b)
876 return out
876 return out
877
877
878 def branches(self, nodes):
878 def branches(self, nodes):
879 if not nodes:
879 if not nodes:
880 nodes = [self.changelog.tip()]
880 nodes = [self.changelog.tip()]
881 b = []
881 b = []
882 for n in nodes:
882 for n in nodes:
883 t = n
883 t = n
884 while 1:
884 while 1:
885 p = self.changelog.parents(n)
885 p = self.changelog.parents(n)
886 if p[1] != nullid or p[0] == nullid:
886 if p[1] != nullid or p[0] == nullid:
887 b.append((t, n, p[0], p[1]))
887 b.append((t, n, p[0], p[1]))
888 break
888 break
889 n = p[0]
889 n = p[0]
890 return b
890 return b
891
891
892 def between(self, pairs):
892 def between(self, pairs):
893 r = []
893 r = []
894
894
895 for top, bottom in pairs:
895 for top, bottom in pairs:
896 n, l, i = top, [], 0
896 n, l, i = top, [], 0
897 f = 1
897 f = 1
898
898
899 while n != bottom:
899 while n != bottom:
900 p = self.changelog.parents(n)[0]
900 p = self.changelog.parents(n)[0]
901 if i == f:
901 if i == f:
902 l.append(n)
902 l.append(n)
903 f = f * 2
903 f = f * 2
904 n = p
904 n = p
905 i += 1
905 i += 1
906
906
907 r.append(l)
907 r.append(l)
908
908
909 return r
909 return r
910
910
911 def findincoming(self, remote, base=None, heads=None, force=False):
911 def findincoming(self, remote, base=None, heads=None, force=False):
912 """Return list of roots of the subsets of missing nodes from remote
912 """Return list of roots of the subsets of missing nodes from remote
913
913
914 If base dict is specified, assume that these nodes and their parents
914 If base dict is specified, assume that these nodes and their parents
915 exist on the remote side and that no child of a node of base exists
915 exist on the remote side and that no child of a node of base exists
916 in both remote and self.
916 in both remote and self.
917 Furthermore base will be updated to include the nodes that exists
917 Furthermore base will be updated to include the nodes that exists
918 in self and remote but no children exists in self and remote.
918 in self and remote but no children exists in self and remote.
919 If a list of heads is specified, return only nodes which are heads
919 If a list of heads is specified, return only nodes which are heads
920 or ancestors of these heads.
920 or ancestors of these heads.
921
921
922 All the ancestors of base are in self and in remote.
922 All the ancestors of base are in self and in remote.
923 All the descendants of the list returned are missing in self.
923 All the descendants of the list returned are missing in self.
924 (and so we know that the rest of the nodes are missing in remote, see
924 (and so we know that the rest of the nodes are missing in remote, see
925 outgoing)
925 outgoing)
926 """
926 """
927 m = self.changelog.nodemap
927 m = self.changelog.nodemap
928 search = []
928 search = []
929 fetch = {}
929 fetch = {}
930 seen = {}
930 seen = {}
931 seenbranch = {}
931 seenbranch = {}
932 if base == None:
932 if base == None:
933 base = {}
933 base = {}
934
934
935 if not heads:
935 if not heads:
936 heads = remote.heads()
936 heads = remote.heads()
937
937
938 if self.changelog.tip() == nullid:
938 if self.changelog.tip() == nullid:
939 base[nullid] = 1
939 base[nullid] = 1
940 if heads != [nullid]:
940 if heads != [nullid]:
941 return [nullid]
941 return [nullid]
942 return []
942 return []
943
943
944 # assume we're closer to the tip than the root
944 # assume we're closer to the tip than the root
945 # and start by examining the heads
945 # and start by examining the heads
946 self.ui.status(_("searching for changes\n"))
946 self.ui.status(_("searching for changes\n"))
947
947
948 unknown = []
948 unknown = []
949 for h in heads:
949 for h in heads:
950 if h not in m:
950 if h not in m:
951 unknown.append(h)
951 unknown.append(h)
952 else:
952 else:
953 base[h] = 1
953 base[h] = 1
954
954
955 if not unknown:
955 if not unknown:
956 return []
956 return []
957
957
958 req = dict.fromkeys(unknown)
958 req = dict.fromkeys(unknown)
959 reqcnt = 0
959 reqcnt = 0
960
960
961 # search through remote branches
961 # search through remote branches
962 # a 'branch' here is a linear segment of history, with four parts:
962 # a 'branch' here is a linear segment of history, with four parts:
963 # head, root, first parent, second parent
963 # head, root, first parent, second parent
964 # (a branch always has two parents (or none) by definition)
964 # (a branch always has two parents (or none) by definition)
965 unknown = remote.branches(unknown)
965 unknown = remote.branches(unknown)
966 while unknown:
966 while unknown:
967 r = []
967 r = []
968 while unknown:
968 while unknown:
969 n = unknown.pop(0)
969 n = unknown.pop(0)
970 if n[0] in seen:
970 if n[0] in seen:
971 continue
971 continue
972
972
973 self.ui.debug(_("examining %s:%s\n")
973 self.ui.debug(_("examining %s:%s\n")
974 % (short(n[0]), short(n[1])))
974 % (short(n[0]), short(n[1])))
975 if n[0] == nullid: # found the end of the branch
975 if n[0] == nullid: # found the end of the branch
976 pass
976 pass
977 elif n in seenbranch:
977 elif n in seenbranch:
978 self.ui.debug(_("branch already found\n"))
978 self.ui.debug(_("branch already found\n"))
979 continue
979 continue
980 elif n[1] and n[1] in m: # do we know the base?
980 elif n[1] and n[1] in m: # do we know the base?
981 self.ui.debug(_("found incomplete branch %s:%s\n")
981 self.ui.debug(_("found incomplete branch %s:%s\n")
982 % (short(n[0]), short(n[1])))
982 % (short(n[0]), short(n[1])))
983 search.append(n) # schedule branch range for scanning
983 search.append(n) # schedule branch range for scanning
984 seenbranch[n] = 1
984 seenbranch[n] = 1
985 else:
985 else:
986 if n[1] not in seen and n[1] not in fetch:
986 if n[1] not in seen and n[1] not in fetch:
987 if n[2] in m and n[3] in m:
987 if n[2] in m and n[3] in m:
988 self.ui.debug(_("found new changeset %s\n") %
988 self.ui.debug(_("found new changeset %s\n") %
989 short(n[1]))
989 short(n[1]))
990 fetch[n[1]] = 1 # earliest unknown
990 fetch[n[1]] = 1 # earliest unknown
991 for p in n[2:4]:
991 for p in n[2:4]:
992 if p in m:
992 if p in m:
993 base[p] = 1 # latest known
993 base[p] = 1 # latest known
994
994
995 for p in n[2:4]:
995 for p in n[2:4]:
996 if p not in req and p not in m:
996 if p not in req and p not in m:
997 r.append(p)
997 r.append(p)
998 req[p] = 1
998 req[p] = 1
999 seen[n[0]] = 1
999 seen[n[0]] = 1
1000
1000
1001 if r:
1001 if r:
1002 reqcnt += 1
1002 reqcnt += 1
1003 self.ui.debug(_("request %d: %s\n") %
1003 self.ui.debug(_("request %d: %s\n") %
1004 (reqcnt, " ".join(map(short, r))))
1004 (reqcnt, " ".join(map(short, r))))
1005 for p in range(0, len(r), 10):
1005 for p in range(0, len(r), 10):
1006 for b in remote.branches(r[p:p+10]):
1006 for b in remote.branches(r[p:p+10]):
1007 self.ui.debug(_("received %s:%s\n") %
1007 self.ui.debug(_("received %s:%s\n") %
1008 (short(b[0]), short(b[1])))
1008 (short(b[0]), short(b[1])))
1009 unknown.append(b)
1009 unknown.append(b)
1010
1010
1011 # do binary search on the branches we found
1011 # do binary search on the branches we found
1012 while search:
1012 while search:
1013 n = search.pop(0)
1013 n = search.pop(0)
1014 reqcnt += 1
1014 reqcnt += 1
1015 l = remote.between([(n[0], n[1])])[0]
1015 l = remote.between([(n[0], n[1])])[0]
1016 l.append(n[1])
1016 l.append(n[1])
1017 p = n[0]
1017 p = n[0]
1018 f = 1
1018 f = 1
1019 for i in l:
1019 for i in l:
1020 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1020 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1021 if i in m:
1021 if i in m:
1022 if f <= 2:
1022 if f <= 2:
1023 self.ui.debug(_("found new branch changeset %s\n") %
1023 self.ui.debug(_("found new branch changeset %s\n") %
1024 short(p))
1024 short(p))
1025 fetch[p] = 1
1025 fetch[p] = 1
1026 base[i] = 1
1026 base[i] = 1
1027 else:
1027 else:
1028 self.ui.debug(_("narrowed branch search to %s:%s\n")
1028 self.ui.debug(_("narrowed branch search to %s:%s\n")
1029 % (short(p), short(i)))
1029 % (short(p), short(i)))
1030 search.append((p, i))
1030 search.append((p, i))
1031 break
1031 break
1032 p, f = i, f * 2
1032 p, f = i, f * 2
1033
1033
1034 # sanity check our fetch list
1034 # sanity check our fetch list
1035 for f in fetch.keys():
1035 for f in fetch.keys():
1036 if f in m:
1036 if f in m:
1037 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1037 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1038
1038
1039 if base.keys() == [nullid]:
1039 if base.keys() == [nullid]:
1040 if force:
1040 if force:
1041 self.ui.warn(_("warning: repository is unrelated\n"))
1041 self.ui.warn(_("warning: repository is unrelated\n"))
1042 else:
1042 else:
1043 raise util.Abort(_("repository is unrelated"))
1043 raise util.Abort(_("repository is unrelated"))
1044
1044
1045 self.ui.note(_("found new changesets starting at ") +
1045 self.ui.note(_("found new changesets starting at ") +
1046 " ".join([short(f) for f in fetch]) + "\n")
1046 " ".join([short(f) for f in fetch]) + "\n")
1047
1047
1048 self.ui.debug(_("%d total queries\n") % reqcnt)
1048 self.ui.debug(_("%d total queries\n") % reqcnt)
1049
1049
1050 return fetch.keys()
1050 return fetch.keys()
1051
1051
1052 def findoutgoing(self, remote, base=None, heads=None, force=False):
1052 def findoutgoing(self, remote, base=None, heads=None, force=False):
1053 """Return list of nodes that are roots of subsets not in remote
1053 """Return list of nodes that are roots of subsets not in remote
1054
1054
1055 If base dict is specified, assume that these nodes and their parents
1055 If base dict is specified, assume that these nodes and their parents
1056 exist on the remote side.
1056 exist on the remote side.
1057 If a list of heads is specified, return only nodes which are heads
1057 If a list of heads is specified, return only nodes which are heads
1058 or ancestors of these heads, and return a second element which
1058 or ancestors of these heads, and return a second element which
1059 contains all remote heads which get new children.
1059 contains all remote heads which get new children.
1060 """
1060 """
1061 if base == None:
1061 if base == None:
1062 base = {}
1062 base = {}
1063 self.findincoming(remote, base, heads, force=force)
1063 self.findincoming(remote, base, heads, force=force)
1064
1064
1065 self.ui.debug(_("common changesets up to ")
1065 self.ui.debug(_("common changesets up to ")
1066 + " ".join(map(short, base.keys())) + "\n")
1066 + " ".join(map(short, base.keys())) + "\n")
1067
1067
1068 remain = dict.fromkeys(self.changelog.nodemap)
1068 remain = dict.fromkeys(self.changelog.nodemap)
1069
1069
1070 # prune everything remote has from the tree
1070 # prune everything remote has from the tree
1071 del remain[nullid]
1071 del remain[nullid]
1072 remove = base.keys()
1072 remove = base.keys()
1073 while remove:
1073 while remove:
1074 n = remove.pop(0)
1074 n = remove.pop(0)
1075 if n in remain:
1075 if n in remain:
1076 del remain[n]
1076 del remain[n]
1077 for p in self.changelog.parents(n):
1077 for p in self.changelog.parents(n):
1078 remove.append(p)
1078 remove.append(p)
1079
1079
1080 # find every node whose parents have been pruned
1080 # find every node whose parents have been pruned
1081 subset = []
1081 subset = []
1082 # find every remote head that will get new children
1082 # find every remote head that will get new children
1083 updated_heads = {}
1083 updated_heads = {}
1084 for n in remain:
1084 for n in remain:
1085 p1, p2 = self.changelog.parents(n)
1085 p1, p2 = self.changelog.parents(n)
1086 if p1 not in remain and p2 not in remain:
1086 if p1 not in remain and p2 not in remain:
1087 subset.append(n)
1087 subset.append(n)
1088 if heads:
1088 if heads:
1089 if p1 in heads:
1089 if p1 in heads:
1090 updated_heads[p1] = True
1090 updated_heads[p1] = True
1091 if p2 in heads:
1091 if p2 in heads:
1092 updated_heads[p2] = True
1092 updated_heads[p2] = True
1093
1093
1094 # this is the set of all roots we have to push
1094 # this is the set of all roots we have to push
1095 if heads:
1095 if heads:
1096 return subset, updated_heads.keys()
1096 return subset, updated_heads.keys()
1097 else:
1097 else:
1098 return subset
1098 return subset
1099
1099
1100 def pull(self, remote, heads=None, force=False):
1100 def pull(self, remote, heads=None, force=False):
1101 l = self.lock()
1101 l = self.lock()
1102
1102
1103 fetch = self.findincoming(remote, force=force)
1103 fetch = self.findincoming(remote, force=force)
1104 if fetch == [nullid]:
1104 if fetch == [nullid]:
1105 self.ui.status(_("requesting all changes\n"))
1105 self.ui.status(_("requesting all changes\n"))
1106
1106
1107 if not fetch:
1107 if not fetch:
1108 self.ui.status(_("no changes found\n"))
1108 self.ui.status(_("no changes found\n"))
1109 return 0
1109 return 0
1110
1110
1111 if heads is None:
1111 if heads is None:
1112 cg = remote.changegroup(fetch, 'pull')
1112 cg = remote.changegroup(fetch, 'pull')
1113 else:
1113 else:
1114 cg = remote.changegroupsubset(fetch, heads, 'pull')
1114 cg = remote.changegroupsubset(fetch, heads, 'pull')
1115 return self.addchangegroup(cg, 'pull')
1115 return self.addchangegroup(cg, 'pull')
1116
1116
1117 def push(self, remote, force=False, revs=None):
1117 def push(self, remote, force=False, revs=None):
1118 # there are two ways to push to remote repo:
1118 # there are two ways to push to remote repo:
1119 #
1119 #
1120 # addchangegroup assumes local user can lock remote
1120 # addchangegroup assumes local user can lock remote
1121 # repo (local filesystem, old ssh servers).
1121 # repo (local filesystem, old ssh servers).
1122 #
1122 #
1123 # unbundle assumes local user cannot lock remote repo (new ssh
1123 # unbundle assumes local user cannot lock remote repo (new ssh
1124 # servers, http servers).
1124 # servers, http servers).
1125
1125
1126 if 'unbundle' in remote.capabilities:
1126 if 'unbundle' in remote.capabilities:
1127 return self.push_unbundle(remote, force, revs)
1127 return self.push_unbundle(remote, force, revs)
1128 return self.push_addchangegroup(remote, force, revs)
1128 return self.push_addchangegroup(remote, force, revs)
1129
1129
1130 def prepush(self, remote, force, revs):
1130 def prepush(self, remote, force, revs):
1131 base = {}
1131 base = {}
1132 remote_heads = remote.heads()
1132 remote_heads = remote.heads()
1133 inc = self.findincoming(remote, base, remote_heads, force=force)
1133 inc = self.findincoming(remote, base, remote_heads, force=force)
1134 if not force and inc:
1134 if not force and inc:
1135 self.ui.warn(_("abort: unsynced remote changes!\n"))
1135 self.ui.warn(_("abort: unsynced remote changes!\n"))
1136 self.ui.status(_("(did you forget to sync?"
1136 self.ui.status(_("(did you forget to sync?"
1137 " use push -f to force)\n"))
1137 " use push -f to force)\n"))
1138 return None, 1
1138 return None, 1
1139
1139
1140 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1140 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1141 if revs is not None:
1141 if revs is not None:
1142 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1142 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1143 else:
1143 else:
1144 bases, heads = update, self.changelog.heads()
1144 bases, heads = update, self.changelog.heads()
1145
1145
1146 if not bases:
1146 if not bases:
1147 self.ui.status(_("no changes found\n"))
1147 self.ui.status(_("no changes found\n"))
1148 return None, 1
1148 return None, 1
1149 elif not force:
1149 elif not force:
1150 # FIXME we don't properly detect creation of new heads
1150 # FIXME we don't properly detect creation of new heads
1151 # in the push -r case, assume the user knows what he's doing
1151 # in the push -r case, assume the user knows what he's doing
1152 if not revs and len(remote_heads) < len(heads) \
1152 if not revs and len(remote_heads) < len(heads) \
1153 and remote_heads != [nullid]:
1153 and remote_heads != [nullid]:
1154 self.ui.warn(_("abort: push creates new remote branches!\n"))
1154 self.ui.warn(_("abort: push creates new remote branches!\n"))
1155 self.ui.status(_("(did you forget to merge?"
1155 self.ui.status(_("(did you forget to merge?"
1156 " use push -f to force)\n"))
1156 " use push -f to force)\n"))
1157 return None, 1
1157 return None, 1
1158
1158
1159 if revs is None:
1159 if revs is None:
1160 cg = self.changegroup(update, 'push')
1160 cg = self.changegroup(update, 'push')
1161 else:
1161 else:
1162 cg = self.changegroupsubset(update, revs, 'push')
1162 cg = self.changegroupsubset(update, revs, 'push')
1163 return cg, remote_heads
1163 return cg, remote_heads
1164
1164
1165 def push_addchangegroup(self, remote, force, revs):
1165 def push_addchangegroup(self, remote, force, revs):
1166 lock = remote.lock()
1166 lock = remote.lock()
1167
1167
1168 ret = self.prepush(remote, force, revs)
1168 ret = self.prepush(remote, force, revs)
1169 if ret[0] is not None:
1169 if ret[0] is not None:
1170 cg, remote_heads = ret
1170 cg, remote_heads = ret
1171 return remote.addchangegroup(cg, 'push')
1171 return remote.addchangegroup(cg, 'push')
1172 return ret[1]
1172 return ret[1]
1173
1173
1174 def push_unbundle(self, remote, force, revs):
1174 def push_unbundle(self, remote, force, revs):
1175 # local repo finds heads on server, finds out what revs it
1175 # local repo finds heads on server, finds out what revs it
1176 # must push. once revs transferred, if server finds it has
1176 # must push. once revs transferred, if server finds it has
1177 # different heads (someone else won commit/push race), server
1177 # different heads (someone else won commit/push race), server
1178 # aborts.
1178 # aborts.
1179
1179
1180 ret = self.prepush(remote, force, revs)
1180 ret = self.prepush(remote, force, revs)
1181 if ret[0] is not None:
1181 if ret[0] is not None:
1182 cg, remote_heads = ret
1182 cg, remote_heads = ret
1183 if force: remote_heads = ['force']
1183 if force: remote_heads = ['force']
1184 return remote.unbundle(cg, remote_heads, 'push')
1184 return remote.unbundle(cg, remote_heads, 'push')
1185 return ret[1]
1185 return ret[1]
1186
1186
1187 def changegroupsubset(self, bases, heads, source):
1187 def changegroupsubset(self, bases, heads, source):
1188 """This function generates a changegroup consisting of all the nodes
1188 """This function generates a changegroup consisting of all the nodes
1189 that are descendents of any of the bases, and ancestors of any of
1189 that are descendents of any of the bases, and ancestors of any of
1190 the heads.
1190 the heads.
1191
1191
1192 It is fairly complex as determining which filenodes and which
1192 It is fairly complex as determining which filenodes and which
1193 manifest nodes need to be included for the changeset to be complete
1193 manifest nodes need to be included for the changeset to be complete
1194 is non-trivial.
1194 is non-trivial.
1195
1195
1196 Another wrinkle is doing the reverse, figuring out which changeset in
1196 Another wrinkle is doing the reverse, figuring out which changeset in
1197 the changegroup a particular filenode or manifestnode belongs to."""
1197 the changegroup a particular filenode or manifestnode belongs to."""
1198
1198
1199 self.hook('preoutgoing', throw=True, source=source)
1199 self.hook('preoutgoing', throw=True, source=source)
1200
1200
1201 # Set up some initial variables
1201 # Set up some initial variables
1202 # Make it easy to refer to self.changelog
1202 # Make it easy to refer to self.changelog
1203 cl = self.changelog
1203 cl = self.changelog
1204 # msng is short for missing - compute the list of changesets in this
1204 # msng is short for missing - compute the list of changesets in this
1205 # changegroup.
1205 # changegroup.
1206 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1206 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1207 # Some bases may turn out to be superfluous, and some heads may be
1207 # Some bases may turn out to be superfluous, and some heads may be
1208 # too. nodesbetween will return the minimal set of bases and heads
1208 # too. nodesbetween will return the minimal set of bases and heads
1209 # necessary to re-create the changegroup.
1209 # necessary to re-create the changegroup.
1210
1210
1211 # Known heads are the list of heads that it is assumed the recipient
1211 # Known heads are the list of heads that it is assumed the recipient
1212 # of this changegroup will know about.
1212 # of this changegroup will know about.
1213 knownheads = {}
1213 knownheads = {}
1214 # We assume that all parents of bases are known heads.
1214 # We assume that all parents of bases are known heads.
1215 for n in bases:
1215 for n in bases:
1216 for p in cl.parents(n):
1216 for p in cl.parents(n):
1217 if p != nullid:
1217 if p != nullid:
1218 knownheads[p] = 1
1218 knownheads[p] = 1
1219 knownheads = knownheads.keys()
1219 knownheads = knownheads.keys()
1220 if knownheads:
1220 if knownheads:
1221 # Now that we know what heads are known, we can compute which
1221 # Now that we know what heads are known, we can compute which
1222 # changesets are known. The recipient must know about all
1222 # changesets are known. The recipient must know about all
1223 # changesets required to reach the known heads from the null
1223 # changesets required to reach the known heads from the null
1224 # changeset.
1224 # changeset.
1225 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1225 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1226 junk = None
1226 junk = None
1227 # Transform the list into an ersatz set.
1227 # Transform the list into an ersatz set.
1228 has_cl_set = dict.fromkeys(has_cl_set)
1228 has_cl_set = dict.fromkeys(has_cl_set)
1229 else:
1229 else:
1230 # If there were no known heads, the recipient cannot be assumed to
1230 # If there were no known heads, the recipient cannot be assumed to
1231 # know about any changesets.
1231 # know about any changesets.
1232 has_cl_set = {}
1232 has_cl_set = {}
1233
1233
1234 # Make it easy to refer to self.manifest
1234 # Make it easy to refer to self.manifest
1235 mnfst = self.manifest
1235 mnfst = self.manifest
1236 # We don't know which manifests are missing yet
1236 # We don't know which manifests are missing yet
1237 msng_mnfst_set = {}
1237 msng_mnfst_set = {}
1238 # Nor do we know which filenodes are missing.
1238 # Nor do we know which filenodes are missing.
1239 msng_filenode_set = {}
1239 msng_filenode_set = {}
1240
1240
1241 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1241 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1242 junk = None
1242 junk = None
1243
1243
1244 # A changeset always belongs to itself, so the changenode lookup
1244 # A changeset always belongs to itself, so the changenode lookup
1245 # function for a changenode is identity.
1245 # function for a changenode is identity.
1246 def identity(x):
1246 def identity(x):
1247 return x
1247 return x
1248
1248
1249 # A function generating function. Sets up an environment for the
1249 # A function generating function. Sets up an environment for the
1250 # inner function.
1250 # inner function.
1251 def cmp_by_rev_func(revlog):
1251 def cmp_by_rev_func(revlog):
1252 # Compare two nodes by their revision number in the environment's
1252 # Compare two nodes by their revision number in the environment's
1253 # revision history. Since the revision number both represents the
1253 # revision history. Since the revision number both represents the
1254 # most efficient order to read the nodes in, and represents a
1254 # most efficient order to read the nodes in, and represents a
1255 # topological sorting of the nodes, this function is often useful.
1255 # topological sorting of the nodes, this function is often useful.
1256 def cmp_by_rev(a, b):
1256 def cmp_by_rev(a, b):
1257 return cmp(revlog.rev(a), revlog.rev(b))
1257 return cmp(revlog.rev(a), revlog.rev(b))
1258 return cmp_by_rev
1258 return cmp_by_rev
1259
1259
1260 # If we determine that a particular file or manifest node must be a
1260 # If we determine that a particular file or manifest node must be a
1261 # node that the recipient of the changegroup will already have, we can
1261 # node that the recipient of the changegroup will already have, we can
1262 # also assume the recipient will have all the parents. This function
1262 # also assume the recipient will have all the parents. This function
1263 # prunes them from the set of missing nodes.
1263 # prunes them from the set of missing nodes.
1264 def prune_parents(revlog, hasset, msngset):
1264 def prune_parents(revlog, hasset, msngset):
1265 haslst = hasset.keys()
1265 haslst = hasset.keys()
1266 haslst.sort(cmp_by_rev_func(revlog))
1266 haslst.sort(cmp_by_rev_func(revlog))
1267 for node in haslst:
1267 for node in haslst:
1268 parentlst = [p for p in revlog.parents(node) if p != nullid]
1268 parentlst = [p for p in revlog.parents(node) if p != nullid]
1269 while parentlst:
1269 while parentlst:
1270 n = parentlst.pop()
1270 n = parentlst.pop()
1271 if n not in hasset:
1271 if n not in hasset:
1272 hasset[n] = 1
1272 hasset[n] = 1
1273 p = [p for p in revlog.parents(n) if p != nullid]
1273 p = [p for p in revlog.parents(n) if p != nullid]
1274 parentlst.extend(p)
1274 parentlst.extend(p)
1275 for n in hasset:
1275 for n in hasset:
1276 msngset.pop(n, None)
1276 msngset.pop(n, None)
1277
1277
1278 # This is a function generating function used to set up an environment
1278 # This is a function generating function used to set up an environment
1279 # for the inner function to execute in.
1279 # for the inner function to execute in.
1280 def manifest_and_file_collector(changedfileset):
1280 def manifest_and_file_collector(changedfileset):
1281 # This is an information gathering function that gathers
1281 # This is an information gathering function that gathers
1282 # information from each changeset node that goes out as part of
1282 # information from each changeset node that goes out as part of
1283 # the changegroup. The information gathered is a list of which
1283 # the changegroup. The information gathered is a list of which
1284 # manifest nodes are potentially required (the recipient may
1284 # manifest nodes are potentially required (the recipient may
1285 # already have them) and total list of all files which were
1285 # already have them) and total list of all files which were
1286 # changed in any changeset in the changegroup.
1286 # changed in any changeset in the changegroup.
1287 #
1287 #
1288 # We also remember the first changenode we saw any manifest
1288 # We also remember the first changenode we saw any manifest
1289 # referenced by so we can later determine which changenode 'owns'
1289 # referenced by so we can later determine which changenode 'owns'
1290 # the manifest.
1290 # the manifest.
1291 def collect_manifests_and_files(clnode):
1291 def collect_manifests_and_files(clnode):
1292 c = cl.read(clnode)
1292 c = cl.read(clnode)
1293 for f in c[3]:
1293 for f in c[3]:
1294 # This is to make sure we only have one instance of each
1294 # This is to make sure we only have one instance of each
1295 # filename string for each filename.
1295 # filename string for each filename.
1296 changedfileset.setdefault(f, f)
1296 changedfileset.setdefault(f, f)
1297 msng_mnfst_set.setdefault(c[0], clnode)
1297 msng_mnfst_set.setdefault(c[0], clnode)
1298 return collect_manifests_and_files
1298 return collect_manifests_and_files
1299
1299
1300 # Figure out which manifest nodes (of the ones we think might be part
1300 # Figure out which manifest nodes (of the ones we think might be part
1301 # of the changegroup) the recipient must know about and remove them
1301 # of the changegroup) the recipient must know about and remove them
1302 # from the changegroup.
1302 # from the changegroup.
1303 def prune_manifests():
1303 def prune_manifests():
1304 has_mnfst_set = {}
1304 has_mnfst_set = {}
1305 for n in msng_mnfst_set:
1305 for n in msng_mnfst_set:
1306 # If a 'missing' manifest thinks it belongs to a changenode
1306 # If a 'missing' manifest thinks it belongs to a changenode
1307 # the recipient is assumed to have, obviously the recipient
1307 # the recipient is assumed to have, obviously the recipient
1308 # must have that manifest.
1308 # must have that manifest.
1309 linknode = cl.node(mnfst.linkrev(n))
1309 linknode = cl.node(mnfst.linkrev(n))
1310 if linknode in has_cl_set:
1310 if linknode in has_cl_set:
1311 has_mnfst_set[n] = 1
1311 has_mnfst_set[n] = 1
1312 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1312 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1313
1313
1314 # Use the information collected in collect_manifests_and_files to say
1314 # Use the information collected in collect_manifests_and_files to say
1315 # which changenode any manifestnode belongs to.
1315 # which changenode any manifestnode belongs to.
1316 def lookup_manifest_link(mnfstnode):
1316 def lookup_manifest_link(mnfstnode):
1317 return msng_mnfst_set[mnfstnode]
1317 return msng_mnfst_set[mnfstnode]
1318
1318
1319 # A function generating function that sets up the initial environment
1319 # A function generating function that sets up the initial environment
1320 # the inner function.
1320 # the inner function.
1321 def filenode_collector(changedfiles):
1321 def filenode_collector(changedfiles):
1322 next_rev = [0]
1322 next_rev = [0]
1323 # This gathers information from each manifestnode included in the
1323 # This gathers information from each manifestnode included in the
1324 # changegroup about which filenodes the manifest node references
1324 # changegroup about which filenodes the manifest node references
1325 # so we can include those in the changegroup too.
1325 # so we can include those in the changegroup too.
1326 #
1326 #
1327 # It also remembers which changenode each filenode belongs to. It
1327 # It also remembers which changenode each filenode belongs to. It
1328 # does this by assuming the a filenode belongs to the changenode
1328 # does this by assuming the a filenode belongs to the changenode
1329 # the first manifest that references it belongs to.
1329 # the first manifest that references it belongs to.
1330 def collect_msng_filenodes(mnfstnode):
1330 def collect_msng_filenodes(mnfstnode):
1331 r = mnfst.rev(mnfstnode)
1331 r = mnfst.rev(mnfstnode)
1332 if r == next_rev[0]:
1332 if r == next_rev[0]:
1333 # If the last rev we looked at was the one just previous,
1333 # If the last rev we looked at was the one just previous,
1334 # we only need to see a diff.
1334 # we only need to see a diff.
1335 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1335 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1336 # For each line in the delta
1336 # For each line in the delta
1337 for dline in delta.splitlines():
1337 for dline in delta.splitlines():
1338 # get the filename and filenode for that line
1338 # get the filename and filenode for that line
1339 f, fnode = dline.split('\0')
1339 f, fnode = dline.split('\0')
1340 fnode = bin(fnode[:40])
1340 fnode = bin(fnode[:40])
1341 f = changedfiles.get(f, None)
1341 f = changedfiles.get(f, None)
1342 # And if the file is in the list of files we care
1342 # And if the file is in the list of files we care
1343 # about.
1343 # about.
1344 if f is not None:
1344 if f is not None:
1345 # Get the changenode this manifest belongs to
1345 # Get the changenode this manifest belongs to
1346 clnode = msng_mnfst_set[mnfstnode]
1346 clnode = msng_mnfst_set[mnfstnode]
1347 # Create the set of filenodes for the file if
1347 # Create the set of filenodes for the file if
1348 # there isn't one already.
1348 # there isn't one already.
1349 ndset = msng_filenode_set.setdefault(f, {})
1349 ndset = msng_filenode_set.setdefault(f, {})
1350 # And set the filenode's changelog node to the
1350 # And set the filenode's changelog node to the
1351 # manifest's if it hasn't been set already.
1351 # manifest's if it hasn't been set already.
1352 ndset.setdefault(fnode, clnode)
1352 ndset.setdefault(fnode, clnode)
1353 else:
1353 else:
1354 # Otherwise we need a full manifest.
1354 # Otherwise we need a full manifest.
1355 m = mnfst.read(mnfstnode)
1355 m = mnfst.read(mnfstnode)
1356 # For every file in we care about.
1356 # For every file in we care about.
1357 for f in changedfiles:
1357 for f in changedfiles:
1358 fnode = m.get(f, None)
1358 fnode = m.get(f, None)
1359 # If it's in the manifest
1359 # If it's in the manifest
1360 if fnode is not None:
1360 if fnode is not None:
1361 # See comments above.
1361 # See comments above.
1362 clnode = msng_mnfst_set[mnfstnode]
1362 clnode = msng_mnfst_set[mnfstnode]
1363 ndset = msng_filenode_set.setdefault(f, {})
1363 ndset = msng_filenode_set.setdefault(f, {})
1364 ndset.setdefault(fnode, clnode)
1364 ndset.setdefault(fnode, clnode)
1365 # Remember the revision we hope to see next.
1365 # Remember the revision we hope to see next.
1366 next_rev[0] = r + 1
1366 next_rev[0] = r + 1
1367 return collect_msng_filenodes
1367 return collect_msng_filenodes
1368
1368
1369 # We have a list of filenodes we think we need for a file, lets remove
1369 # We have a list of filenodes we think we need for a file, lets remove
1370 # all those we now the recipient must have.
1370 # all those we now the recipient must have.
1371 def prune_filenodes(f, filerevlog):
1371 def prune_filenodes(f, filerevlog):
1372 msngset = msng_filenode_set[f]
1372 msngset = msng_filenode_set[f]
1373 hasset = {}
1373 hasset = {}
1374 # If a 'missing' filenode thinks it belongs to a changenode we
1374 # If a 'missing' filenode thinks it belongs to a changenode we
1375 # assume the recipient must have, then the recipient must have
1375 # assume the recipient must have, then the recipient must have
1376 # that filenode.
1376 # that filenode.
1377 for n in msngset:
1377 for n in msngset:
1378 clnode = cl.node(filerevlog.linkrev(n))
1378 clnode = cl.node(filerevlog.linkrev(n))
1379 if clnode in has_cl_set:
1379 if clnode in has_cl_set:
1380 hasset[n] = 1
1380 hasset[n] = 1
1381 prune_parents(filerevlog, hasset, msngset)
1381 prune_parents(filerevlog, hasset, msngset)
1382
1382
1383 # A function generator function that sets up the a context for the
1383 # A function generator function that sets up the a context for the
1384 # inner function.
1384 # inner function.
1385 def lookup_filenode_link_func(fname):
1385 def lookup_filenode_link_func(fname):
1386 msngset = msng_filenode_set[fname]
1386 msngset = msng_filenode_set[fname]
1387 # Lookup the changenode the filenode belongs to.
1387 # Lookup the changenode the filenode belongs to.
1388 def lookup_filenode_link(fnode):
1388 def lookup_filenode_link(fnode):
1389 return msngset[fnode]
1389 return msngset[fnode]
1390 return lookup_filenode_link
1390 return lookup_filenode_link
1391
1391
1392 # Now that we have all theses utility functions to help out and
1392 # Now that we have all theses utility functions to help out and
1393 # logically divide up the task, generate the group.
1393 # logically divide up the task, generate the group.
1394 def gengroup():
1394 def gengroup():
1395 # The set of changed files starts empty.
1395 # The set of changed files starts empty.
1396 changedfiles = {}
1396 changedfiles = {}
1397 # Create a changenode group generator that will call our functions
1397 # Create a changenode group generator that will call our functions
1398 # back to lookup the owning changenode and collect information.
1398 # back to lookup the owning changenode and collect information.
1399 group = cl.group(msng_cl_lst, identity,
1399 group = cl.group(msng_cl_lst, identity,
1400 manifest_and_file_collector(changedfiles))
1400 manifest_and_file_collector(changedfiles))
1401 for chnk in group:
1401 for chnk in group:
1402 yield chnk
1402 yield chnk
1403
1403
1404 # The list of manifests has been collected by the generator
1404 # The list of manifests has been collected by the generator
1405 # calling our functions back.
1405 # calling our functions back.
1406 prune_manifests()
1406 prune_manifests()
1407 msng_mnfst_lst = msng_mnfst_set.keys()
1407 msng_mnfst_lst = msng_mnfst_set.keys()
1408 # Sort the manifestnodes by revision number.
1408 # Sort the manifestnodes by revision number.
1409 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1409 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1410 # Create a generator for the manifestnodes that calls our lookup
1410 # Create a generator for the manifestnodes that calls our lookup
1411 # and data collection functions back.
1411 # and data collection functions back.
1412 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1412 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1413 filenode_collector(changedfiles))
1413 filenode_collector(changedfiles))
1414 for chnk in group:
1414 for chnk in group:
1415 yield chnk
1415 yield chnk
1416
1416
1417 # These are no longer needed, dereference and toss the memory for
1417 # These are no longer needed, dereference and toss the memory for
1418 # them.
1418 # them.
1419 msng_mnfst_lst = None
1419 msng_mnfst_lst = None
1420 msng_mnfst_set.clear()
1420 msng_mnfst_set.clear()
1421
1421
1422 changedfiles = changedfiles.keys()
1422 changedfiles = changedfiles.keys()
1423 changedfiles.sort()
1423 changedfiles.sort()
1424 # Go through all our files in order sorted by name.
1424 # Go through all our files in order sorted by name.
1425 for fname in changedfiles:
1425 for fname in changedfiles:
1426 filerevlog = self.file(fname)
1426 filerevlog = self.file(fname)
1427 # Toss out the filenodes that the recipient isn't really
1427 # Toss out the filenodes that the recipient isn't really
1428 # missing.
1428 # missing.
1429 if msng_filenode_set.has_key(fname):
1429 if msng_filenode_set.has_key(fname):
1430 prune_filenodes(fname, filerevlog)
1430 prune_filenodes(fname, filerevlog)
1431 msng_filenode_lst = msng_filenode_set[fname].keys()
1431 msng_filenode_lst = msng_filenode_set[fname].keys()
1432 else:
1432 else:
1433 msng_filenode_lst = []
1433 msng_filenode_lst = []
1434 # If any filenodes are left, generate the group for them,
1434 # If any filenodes are left, generate the group for them,
1435 # otherwise don't bother.
1435 # otherwise don't bother.
1436 if len(msng_filenode_lst) > 0:
1436 if len(msng_filenode_lst) > 0:
1437 yield changegroup.genchunk(fname)
1437 yield changegroup.genchunk(fname)
1438 # Sort the filenodes by their revision #
1438 # Sort the filenodes by their revision #
1439 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1439 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1440 # Create a group generator and only pass in a changenode
1440 # Create a group generator and only pass in a changenode
1441 # lookup function as we need to collect no information
1441 # lookup function as we need to collect no information
1442 # from filenodes.
1442 # from filenodes.
1443 group = filerevlog.group(msng_filenode_lst,
1443 group = filerevlog.group(msng_filenode_lst,
1444 lookup_filenode_link_func(fname))
1444 lookup_filenode_link_func(fname))
1445 for chnk in group:
1445 for chnk in group:
1446 yield chnk
1446 yield chnk
1447 if msng_filenode_set.has_key(fname):
1447 if msng_filenode_set.has_key(fname):
1448 # Don't need this anymore, toss it to free memory.
1448 # Don't need this anymore, toss it to free memory.
1449 del msng_filenode_set[fname]
1449 del msng_filenode_set[fname]
1450 # Signal that no more groups are left.
1450 # Signal that no more groups are left.
1451 yield changegroup.closechunk()
1451 yield changegroup.closechunk()
1452
1452
1453 if msng_cl_lst:
1453 if msng_cl_lst:
1454 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1454 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1455
1455
1456 return util.chunkbuffer(gengroup())
1456 return util.chunkbuffer(gengroup())
1457
1457
1458 def changegroup(self, basenodes, source):
1458 def changegroup(self, basenodes, source):
1459 """Generate a changegroup of all nodes that we have that a recipient
1459 """Generate a changegroup of all nodes that we have that a recipient
1460 doesn't.
1460 doesn't.
1461
1461
1462 This is much easier than the previous function as we can assume that
1462 This is much easier than the previous function as we can assume that
1463 the recipient has any changenode we aren't sending them."""
1463 the recipient has any changenode we aren't sending them."""
1464
1464
1465 self.hook('preoutgoing', throw=True, source=source)
1465 self.hook('preoutgoing', throw=True, source=source)
1466
1466
1467 cl = self.changelog
1467 cl = self.changelog
1468 nodes = cl.nodesbetween(basenodes, None)[0]
1468 nodes = cl.nodesbetween(basenodes, None)[0]
1469 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1469 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1470
1470
1471 def identity(x):
1471 def identity(x):
1472 return x
1472 return x
1473
1473
1474 def gennodelst(revlog):
1474 def gennodelst(revlog):
1475 for r in xrange(0, revlog.count()):
1475 for r in xrange(0, revlog.count()):
1476 n = revlog.node(r)
1476 n = revlog.node(r)
1477 if revlog.linkrev(n) in revset:
1477 if revlog.linkrev(n) in revset:
1478 yield n
1478 yield n
1479
1479
1480 def changed_file_collector(changedfileset):
1480 def changed_file_collector(changedfileset):
1481 def collect_changed_files(clnode):
1481 def collect_changed_files(clnode):
1482 c = cl.read(clnode)
1482 c = cl.read(clnode)
1483 for fname in c[3]:
1483 for fname in c[3]:
1484 changedfileset[fname] = 1
1484 changedfileset[fname] = 1
1485 return collect_changed_files
1485 return collect_changed_files
1486
1486
1487 def lookuprevlink_func(revlog):
1487 def lookuprevlink_func(revlog):
1488 def lookuprevlink(n):
1488 def lookuprevlink(n):
1489 return cl.node(revlog.linkrev(n))
1489 return cl.node(revlog.linkrev(n))
1490 return lookuprevlink
1490 return lookuprevlink
1491
1491
1492 def gengroup():
1492 def gengroup():
1493 # construct a list of all changed files
1493 # construct a list of all changed files
1494 changedfiles = {}
1494 changedfiles = {}
1495
1495
1496 for chnk in cl.group(nodes, identity,
1496 for chnk in cl.group(nodes, identity,
1497 changed_file_collector(changedfiles)):
1497 changed_file_collector(changedfiles)):
1498 yield chnk
1498 yield chnk
1499 changedfiles = changedfiles.keys()
1499 changedfiles = changedfiles.keys()
1500 changedfiles.sort()
1500 changedfiles.sort()
1501
1501
1502 mnfst = self.manifest
1502 mnfst = self.manifest
1503 nodeiter = gennodelst(mnfst)
1503 nodeiter = gennodelst(mnfst)
1504 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1504 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1505 yield chnk
1505 yield chnk
1506
1506
1507 for fname in changedfiles:
1507 for fname in changedfiles:
1508 filerevlog = self.file(fname)
1508 filerevlog = self.file(fname)
1509 nodeiter = gennodelst(filerevlog)
1509 nodeiter = gennodelst(filerevlog)
1510 nodeiter = list(nodeiter)
1510 nodeiter = list(nodeiter)
1511 if nodeiter:
1511 if nodeiter:
1512 yield changegroup.genchunk(fname)
1512 yield changegroup.genchunk(fname)
1513 lookup = lookuprevlink_func(filerevlog)
1513 lookup = lookuprevlink_func(filerevlog)
1514 for chnk in filerevlog.group(nodeiter, lookup):
1514 for chnk in filerevlog.group(nodeiter, lookup):
1515 yield chnk
1515 yield chnk
1516
1516
1517 yield changegroup.closechunk()
1517 yield changegroup.closechunk()
1518
1518
1519 if nodes:
1519 if nodes:
1520 self.hook('outgoing', node=hex(nodes[0]), source=source)
1520 self.hook('outgoing', node=hex(nodes[0]), source=source)
1521
1521
1522 return util.chunkbuffer(gengroup())
1522 return util.chunkbuffer(gengroup())
1523
1523
1524 def addchangegroup(self, source, srctype):
1524 def addchangegroup(self, source, srctype):
1525 """add changegroup to repo.
1525 """add changegroup to repo.
1526 returns number of heads modified or added + 1."""
1526 returns number of heads modified or added + 1."""
1527
1527
1528 def csmap(x):
1528 def csmap(x):
1529 self.ui.debug(_("add changeset %s\n") % short(x))
1529 self.ui.debug(_("add changeset %s\n") % short(x))
1530 return cl.count()
1530 return cl.count()
1531
1531
1532 def revmap(x):
1532 def revmap(x):
1533 return cl.rev(x)
1533 return cl.rev(x)
1534
1534
1535 if not source:
1535 if not source:
1536 return 0
1536 return 0
1537
1537
1538 self.hook('prechangegroup', throw=True, source=srctype)
1538 self.hook('prechangegroup', throw=True, source=srctype)
1539
1539
1540 changesets = files = revisions = 0
1540 changesets = files = revisions = 0
1541
1541
1542 tr = self.transaction()
1542 tr = self.transaction()
1543
1543
1544 # write changelog data to temp files so concurrent readers will not see
1544 # write changelog data to temp files so concurrent readers will not see
1545 # inconsistent view
1545 # inconsistent view
1546 cl = None
1546 cl = None
1547 try:
1547 try:
1548 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1548 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1549
1549
1550 oldheads = len(cl.heads())
1550 oldheads = len(cl.heads())
1551
1551
1552 # pull off the changeset group
1552 # pull off the changeset group
1553 self.ui.status(_("adding changesets\n"))
1553 self.ui.status(_("adding changesets\n"))
1554 cor = cl.count() - 1
1554 cor = cl.count() - 1
1555 chunkiter = changegroup.chunkiter(source)
1555 chunkiter = changegroup.chunkiter(source)
1556 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1556 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1557 raise util.Abort(_("received changelog group is empty"))
1557 raise util.Abort(_("received changelog group is empty"))
1558 cnr = cl.count() - 1
1558 cnr = cl.count() - 1
1559 changesets = cnr - cor
1559 changesets = cnr - cor
1560
1560
1561 # pull off the manifest group
1561 # pull off the manifest group
1562 self.ui.status(_("adding manifests\n"))
1562 self.ui.status(_("adding manifests\n"))
1563 chunkiter = changegroup.chunkiter(source)
1563 chunkiter = changegroup.chunkiter(source)
1564 # no need to check for empty manifest group here:
1564 # no need to check for empty manifest group here:
1565 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1565 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1566 # no new manifest will be created and the manifest group will
1566 # no new manifest will be created and the manifest group will
1567 # be empty during the pull
1567 # be empty during the pull
1568 self.manifest.addgroup(chunkiter, revmap, tr)
1568 self.manifest.addgroup(chunkiter, revmap, tr)
1569
1569
1570 # process the files
1570 # process the files
1571 self.ui.status(_("adding file changes\n"))
1571 self.ui.status(_("adding file changes\n"))
1572 while 1:
1572 while 1:
1573 f = changegroup.getchunk(source)
1573 f = changegroup.getchunk(source)
1574 if not f:
1574 if not f:
1575 break
1575 break
1576 self.ui.debug(_("adding %s revisions\n") % f)
1576 self.ui.debug(_("adding %s revisions\n") % f)
1577 fl = self.file(f)
1577 fl = self.file(f)
1578 o = fl.count()
1578 o = fl.count()
1579 chunkiter = changegroup.chunkiter(source)
1579 chunkiter = changegroup.chunkiter(source)
1580 if fl.addgroup(chunkiter, revmap, tr) is None:
1580 if fl.addgroup(chunkiter, revmap, tr) is None:
1581 raise util.Abort(_("received file revlog group is empty"))
1581 raise util.Abort(_("received file revlog group is empty"))
1582 revisions += fl.count() - o
1582 revisions += fl.count() - o
1583 files += 1
1583 files += 1
1584
1584
1585 cl.writedata()
1585 cl.writedata()
1586 finally:
1586 finally:
1587 if cl:
1587 if cl:
1588 cl.cleanup()
1588 cl.cleanup()
1589
1589
1590 # make changelog see real files again
1590 # make changelog see real files again
1591 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1591 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1592 self.changelog.checkinlinesize(tr)
1592 self.changelog.checkinlinesize(tr)
1593
1593
1594 newheads = len(self.changelog.heads())
1594 newheads = len(self.changelog.heads())
1595 heads = ""
1595 heads = ""
1596 if oldheads and newheads != oldheads:
1596 if oldheads and newheads != oldheads:
1597 heads = _(" (%+d heads)") % (newheads - oldheads)
1597 heads = _(" (%+d heads)") % (newheads - oldheads)
1598
1598
1599 self.ui.status(_("added %d changesets"
1599 self.ui.status(_("added %d changesets"
1600 " with %d changes to %d files%s\n")
1600 " with %d changes to %d files%s\n")
1601 % (changesets, revisions, files, heads))
1601 % (changesets, revisions, files, heads))
1602
1602
1603 if changesets > 0:
1603 if changesets > 0:
1604 self.hook('pretxnchangegroup', throw=True,
1604 self.hook('pretxnchangegroup', throw=True,
1605 node=hex(self.changelog.node(cor+1)), source=srctype)
1605 node=hex(self.changelog.node(cor+1)), source=srctype)
1606
1606
1607 tr.close()
1607 tr.close()
1608
1608
1609 if changesets > 0:
1609 if changesets > 0:
1610 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1610 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1611 source=srctype)
1611 source=srctype)
1612
1612
1613 for i in range(cor + 1, cnr + 1):
1613 for i in range(cor + 1, cnr + 1):
1614 self.hook("incoming", node=hex(self.changelog.node(i)),
1614 self.hook("incoming", node=hex(self.changelog.node(i)),
1615 source=srctype)
1615 source=srctype)
1616
1616
1617 return newheads - oldheads + 1
1617 return newheads - oldheads + 1
1618
1618
1619 def update(self, node, allow=False, force=False, choose=None,
1619 def update(self, node, allow=False, force=False, choose=None,
1620 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1620 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1621 pl = self.dirstate.parents()
1621 pl = self.dirstate.parents()
1622 if not force and pl[1] != nullid:
1622 if not force and pl[1] != nullid:
1623 raise util.Abort(_("outstanding uncommitted merges"))
1623 raise util.Abort(_("outstanding uncommitted merges"))
1624
1624
1625 err = False
1625 err = False
1626
1626
1627 p1, p2 = pl[0], node
1627 p1, p2 = pl[0], node
1628 pa = self.changelog.ancestor(p1, p2)
1628 pa = self.changelog.ancestor(p1, p2)
1629 m1n = self.changelog.read(p1)[0]
1629 m1n = self.changelog.read(p1)[0]
1630 m2n = self.changelog.read(p2)[0]
1630 m2n = self.changelog.read(p2)[0]
1631 man = self.manifest.ancestor(m1n, m2n)
1631 man = self.manifest.ancestor(m1n, m2n)
1632 m1 = self.manifest.read(m1n)
1632 m1 = self.manifest.read(m1n)
1633 mf1 = self.manifest.readflags(m1n)
1633 mf1 = self.manifest.readflags(m1n)
1634 m2 = self.manifest.read(m2n).copy()
1634 m2 = self.manifest.read(m2n).copy()
1635 mf2 = self.manifest.readflags(m2n)
1635 mf2 = self.manifest.readflags(m2n)
1636 ma = self.manifest.read(man)
1636 ma = self.manifest.read(man)
1637 mfa = self.manifest.readflags(man)
1637 mfa = self.manifest.readflags(man)
1638
1638
1639 modified, added, removed, deleted, unknown = self.changes()
1639 modified, added, removed, deleted, unknown = self.changes()
1640
1640
1641 # is this a jump, or a merge? i.e. is there a linear path
1641 # is this a jump, or a merge? i.e. is there a linear path
1642 # from p1 to p2?
1642 # from p1 to p2?
1643 linear_path = (pa == p1 or pa == p2)
1643 linear_path = (pa == p1 or pa == p2)
1644
1644
1645 if allow and linear_path:
1645 if allow and linear_path:
1646 raise util.Abort(_("there is nothing to merge, "
1646 raise util.Abort(_("there is nothing to merge, just use "
1647 "just use 'hg update'"))
1647 "'hg update' or look at 'hg heads'"))
1648 if allow and not forcemerge:
1648 if allow and not forcemerge:
1649 if modified or added or removed:
1649 if modified or added or removed:
1650 raise util.Abort(_("outstanding uncommitted changes"))
1650 raise util.Abort(_("outstanding uncommitted changes"))
1651
1651
1652 if not forcemerge and not force:
1652 if not forcemerge and not force:
1653 for f in unknown:
1653 for f in unknown:
1654 if f in m2:
1654 if f in m2:
1655 t1 = self.wread(f)
1655 t1 = self.wread(f)
1656 t2 = self.file(f).read(m2[f])
1656 t2 = self.file(f).read(m2[f])
1657 if cmp(t1, t2) != 0:
1657 if cmp(t1, t2) != 0:
1658 raise util.Abort(_("'%s' already exists in the working"
1658 raise util.Abort(_("'%s' already exists in the working"
1659 " dir and differs from remote") % f)
1659 " dir and differs from remote") % f)
1660
1660
1661 # resolve the manifest to determine which files
1661 # resolve the manifest to determine which files
1662 # we care about merging
1662 # we care about merging
1663 self.ui.note(_("resolving manifests\n"))
1663 self.ui.note(_("resolving manifests\n"))
1664 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1664 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1665 (force, allow, moddirstate, linear_path))
1665 (force, allow, moddirstate, linear_path))
1666 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1666 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1667 (short(man), short(m1n), short(m2n)))
1667 (short(man), short(m1n), short(m2n)))
1668
1668
1669 merge = {}
1669 merge = {}
1670 get = {}
1670 get = {}
1671 remove = []
1671 remove = []
1672
1672
1673 # construct a working dir manifest
1673 # construct a working dir manifest
1674 mw = m1.copy()
1674 mw = m1.copy()
1675 mfw = mf1.copy()
1675 mfw = mf1.copy()
1676 umap = dict.fromkeys(unknown)
1676 umap = dict.fromkeys(unknown)
1677
1677
1678 for f in added + modified + unknown:
1678 for f in added + modified + unknown:
1679 mw[f] = ""
1679 mw[f] = ""
1680 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1680 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1681
1681
1682 if moddirstate and not wlock:
1682 if moddirstate and not wlock:
1683 wlock = self.wlock()
1683 wlock = self.wlock()
1684
1684
1685 for f in deleted + removed:
1685 for f in deleted + removed:
1686 if f in mw:
1686 if f in mw:
1687 del mw[f]
1687 del mw[f]
1688
1688
1689 # If we're jumping between revisions (as opposed to merging),
1689 # If we're jumping between revisions (as opposed to merging),
1690 # and if neither the working directory nor the target rev has
1690 # and if neither the working directory nor the target rev has
1691 # the file, then we need to remove it from the dirstate, to
1691 # the file, then we need to remove it from the dirstate, to
1692 # prevent the dirstate from listing the file when it is no
1692 # prevent the dirstate from listing the file when it is no
1693 # longer in the manifest.
1693 # longer in the manifest.
1694 if moddirstate and linear_path and f not in m2:
1694 if moddirstate and linear_path and f not in m2:
1695 self.dirstate.forget((f,))
1695 self.dirstate.forget((f,))
1696
1696
1697 # Compare manifests
1697 # Compare manifests
1698 for f, n in mw.iteritems():
1698 for f, n in mw.iteritems():
1699 if choose and not choose(f):
1699 if choose and not choose(f):
1700 continue
1700 continue
1701 if f in m2:
1701 if f in m2:
1702 s = 0
1702 s = 0
1703
1703
1704 # is the wfile new since m1, and match m2?
1704 # is the wfile new since m1, and match m2?
1705 if f not in m1:
1705 if f not in m1:
1706 t1 = self.wread(f)
1706 t1 = self.wread(f)
1707 t2 = self.file(f).read(m2[f])
1707 t2 = self.file(f).read(m2[f])
1708 if cmp(t1, t2) == 0:
1708 if cmp(t1, t2) == 0:
1709 n = m2[f]
1709 n = m2[f]
1710 del t1, t2
1710 del t1, t2
1711
1711
1712 # are files different?
1712 # are files different?
1713 if n != m2[f]:
1713 if n != m2[f]:
1714 a = ma.get(f, nullid)
1714 a = ma.get(f, nullid)
1715 # are both different from the ancestor?
1715 # are both different from the ancestor?
1716 if n != a and m2[f] != a:
1716 if n != a and m2[f] != a:
1717 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1717 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1718 # merge executable bits
1718 # merge executable bits
1719 # "if we changed or they changed, change in merge"
1719 # "if we changed or they changed, change in merge"
1720 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1720 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1721 mode = ((a^b) | (a^c)) ^ a
1721 mode = ((a^b) | (a^c)) ^ a
1722 merge[f] = (m1.get(f, nullid), m2[f], mode)
1722 merge[f] = (m1.get(f, nullid), m2[f], mode)
1723 s = 1
1723 s = 1
1724 # are we clobbering?
1724 # are we clobbering?
1725 # is remote's version newer?
1725 # is remote's version newer?
1726 # or are we going back in time?
1726 # or are we going back in time?
1727 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1727 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1728 self.ui.debug(_(" remote %s is newer, get\n") % f)
1728 self.ui.debug(_(" remote %s is newer, get\n") % f)
1729 get[f] = m2[f]
1729 get[f] = m2[f]
1730 s = 1
1730 s = 1
1731 elif f in umap or f in added:
1731 elif f in umap or f in added:
1732 # this unknown file is the same as the checkout
1732 # this unknown file is the same as the checkout
1733 # we need to reset the dirstate if the file was added
1733 # we need to reset the dirstate if the file was added
1734 get[f] = m2[f]
1734 get[f] = m2[f]
1735
1735
1736 if not s and mfw[f] != mf2[f]:
1736 if not s and mfw[f] != mf2[f]:
1737 if force:
1737 if force:
1738 self.ui.debug(_(" updating permissions for %s\n") % f)
1738 self.ui.debug(_(" updating permissions for %s\n") % f)
1739 util.set_exec(self.wjoin(f), mf2[f])
1739 util.set_exec(self.wjoin(f), mf2[f])
1740 else:
1740 else:
1741 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1741 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1742 mode = ((a^b) | (a^c)) ^ a
1742 mode = ((a^b) | (a^c)) ^ a
1743 if mode != b:
1743 if mode != b:
1744 self.ui.debug(_(" updating permissions for %s\n")
1744 self.ui.debug(_(" updating permissions for %s\n")
1745 % f)
1745 % f)
1746 util.set_exec(self.wjoin(f), mode)
1746 util.set_exec(self.wjoin(f), mode)
1747 del m2[f]
1747 del m2[f]
1748 elif f in ma:
1748 elif f in ma:
1749 if n != ma[f]:
1749 if n != ma[f]:
1750 r = _("d")
1750 r = _("d")
1751 if not force and (linear_path or allow):
1751 if not force and (linear_path or allow):
1752 r = self.ui.prompt(
1752 r = self.ui.prompt(
1753 (_(" local changed %s which remote deleted\n") % f) +
1753 (_(" local changed %s which remote deleted\n") % f) +
1754 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1754 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1755 if r == _("d"):
1755 if r == _("d"):
1756 remove.append(f)
1756 remove.append(f)
1757 else:
1757 else:
1758 self.ui.debug(_("other deleted %s\n") % f)
1758 self.ui.debug(_("other deleted %s\n") % f)
1759 remove.append(f) # other deleted it
1759 remove.append(f) # other deleted it
1760 else:
1760 else:
1761 # file is created on branch or in working directory
1761 # file is created on branch or in working directory
1762 if force and f not in umap:
1762 if force and f not in umap:
1763 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1763 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1764 remove.append(f)
1764 remove.append(f)
1765 elif n == m1.get(f, nullid): # same as parent
1765 elif n == m1.get(f, nullid): # same as parent
1766 if p2 == pa: # going backwards?
1766 if p2 == pa: # going backwards?
1767 self.ui.debug(_("remote deleted %s\n") % f)
1767 self.ui.debug(_("remote deleted %s\n") % f)
1768 remove.append(f)
1768 remove.append(f)
1769 else:
1769 else:
1770 self.ui.debug(_("local modified %s, keeping\n") % f)
1770 self.ui.debug(_("local modified %s, keeping\n") % f)
1771 else:
1771 else:
1772 self.ui.debug(_("working dir created %s, keeping\n") % f)
1772 self.ui.debug(_("working dir created %s, keeping\n") % f)
1773
1773
1774 for f, n in m2.iteritems():
1774 for f, n in m2.iteritems():
1775 if choose and not choose(f):
1775 if choose and not choose(f):
1776 continue
1776 continue
1777 if f[0] == "/":
1777 if f[0] == "/":
1778 continue
1778 continue
1779 if f in ma and n != ma[f]:
1779 if f in ma and n != ma[f]:
1780 r = _("k")
1780 r = _("k")
1781 if not force and (linear_path or allow):
1781 if not force and (linear_path or allow):
1782 r = self.ui.prompt(
1782 r = self.ui.prompt(
1783 (_("remote changed %s which local deleted\n") % f) +
1783 (_("remote changed %s which local deleted\n") % f) +
1784 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1784 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1785 if r == _("k"):
1785 if r == _("k"):
1786 get[f] = n
1786 get[f] = n
1787 elif f not in ma:
1787 elif f not in ma:
1788 self.ui.debug(_("remote created %s\n") % f)
1788 self.ui.debug(_("remote created %s\n") % f)
1789 get[f] = n
1789 get[f] = n
1790 else:
1790 else:
1791 if force or p2 == pa: # going backwards?
1791 if force or p2 == pa: # going backwards?
1792 self.ui.debug(_("local deleted %s, recreating\n") % f)
1792 self.ui.debug(_("local deleted %s, recreating\n") % f)
1793 get[f] = n
1793 get[f] = n
1794 else:
1794 else:
1795 self.ui.debug(_("local deleted %s\n") % f)
1795 self.ui.debug(_("local deleted %s\n") % f)
1796
1796
1797 del mw, m1, m2, ma
1797 del mw, m1, m2, ma
1798
1798
1799 if force:
1799 if force:
1800 for f in merge:
1800 for f in merge:
1801 get[f] = merge[f][1]
1801 get[f] = merge[f][1]
1802 merge = {}
1802 merge = {}
1803
1803
1804 if linear_path or force:
1804 if linear_path or force:
1805 # we don't need to do any magic, just jump to the new rev
1805 # we don't need to do any magic, just jump to the new rev
1806 branch_merge = False
1806 branch_merge = False
1807 p1, p2 = p2, nullid
1807 p1, p2 = p2, nullid
1808 else:
1808 else:
1809 if not allow:
1809 if not allow:
1810 self.ui.status(_("this update spans a branch"
1810 self.ui.status(_("this update spans a branch"
1811 " affecting the following files:\n"))
1811 " affecting the following files:\n"))
1812 fl = merge.keys() + get.keys()
1812 fl = merge.keys() + get.keys()
1813 fl.sort()
1813 fl.sort()
1814 for f in fl:
1814 for f in fl:
1815 cf = ""
1815 cf = ""
1816 if f in merge:
1816 if f in merge:
1817 cf = _(" (resolve)")
1817 cf = _(" (resolve)")
1818 self.ui.status(" %s%s\n" % (f, cf))
1818 self.ui.status(" %s%s\n" % (f, cf))
1819 self.ui.warn(_("aborting update spanning branches!\n"))
1819 self.ui.warn(_("aborting update spanning branches!\n"))
1820 self.ui.status(_("(use 'hg merge' to merge across branches"
1820 self.ui.status(_("(use 'hg merge' to merge across branches"
1821 " or 'hg update -C' to lose changes)\n"))
1821 " or 'hg update -C' to lose changes)\n"))
1822 return 1
1822 return 1
1823 branch_merge = True
1823 branch_merge = True
1824
1824
1825 xp1 = hex(p1)
1825 xp1 = hex(p1)
1826 xp2 = hex(p2)
1826 xp2 = hex(p2)
1827 if p2 == nullid: xxp2 = ''
1827 if p2 == nullid: xxp2 = ''
1828 else: xxp2 = xp2
1828 else: xxp2 = xp2
1829
1829
1830 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1830 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1831
1831
1832 # get the files we don't need to change
1832 # get the files we don't need to change
1833 files = get.keys()
1833 files = get.keys()
1834 files.sort()
1834 files.sort()
1835 for f in files:
1835 for f in files:
1836 if f[0] == "/":
1836 if f[0] == "/":
1837 continue
1837 continue
1838 self.ui.note(_("getting %s\n") % f)
1838 self.ui.note(_("getting %s\n") % f)
1839 t = self.file(f).read(get[f])
1839 t = self.file(f).read(get[f])
1840 self.wwrite(f, t)
1840 self.wwrite(f, t)
1841 util.set_exec(self.wjoin(f), mf2[f])
1841 util.set_exec(self.wjoin(f), mf2[f])
1842 if moddirstate:
1842 if moddirstate:
1843 if branch_merge:
1843 if branch_merge:
1844 self.dirstate.update([f], 'n', st_mtime=-1)
1844 self.dirstate.update([f], 'n', st_mtime=-1)
1845 else:
1845 else:
1846 self.dirstate.update([f], 'n')
1846 self.dirstate.update([f], 'n')
1847
1847
1848 # merge the tricky bits
1848 # merge the tricky bits
1849 failedmerge = []
1849 failedmerge = []
1850 files = merge.keys()
1850 files = merge.keys()
1851 files.sort()
1851 files.sort()
1852 for f in files:
1852 for f in files:
1853 self.ui.status(_("merging %s\n") % f)
1853 self.ui.status(_("merging %s\n") % f)
1854 my, other, flag = merge[f]
1854 my, other, flag = merge[f]
1855 ret = self.merge3(f, my, other, xp1, xp2)
1855 ret = self.merge3(f, my, other, xp1, xp2)
1856 if ret:
1856 if ret:
1857 err = True
1857 err = True
1858 failedmerge.append(f)
1858 failedmerge.append(f)
1859 util.set_exec(self.wjoin(f), flag)
1859 util.set_exec(self.wjoin(f), flag)
1860 if moddirstate:
1860 if moddirstate:
1861 if branch_merge:
1861 if branch_merge:
1862 # We've done a branch merge, mark this file as merged
1862 # We've done a branch merge, mark this file as merged
1863 # so that we properly record the merger later
1863 # so that we properly record the merger later
1864 self.dirstate.update([f], 'm')
1864 self.dirstate.update([f], 'm')
1865 else:
1865 else:
1866 # We've update-merged a locally modified file, so
1866 # We've update-merged a locally modified file, so
1867 # we set the dirstate to emulate a normal checkout
1867 # we set the dirstate to emulate a normal checkout
1868 # of that file some time in the past. Thus our
1868 # of that file some time in the past. Thus our
1869 # merge will appear as a normal local file
1869 # merge will appear as a normal local file
1870 # modification.
1870 # modification.
1871 f_len = len(self.file(f).read(other))
1871 f_len = len(self.file(f).read(other))
1872 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1872 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1873
1873
1874 remove.sort()
1874 remove.sort()
1875 for f in remove:
1875 for f in remove:
1876 self.ui.note(_("removing %s\n") % f)
1876 self.ui.note(_("removing %s\n") % f)
1877 util.audit_path(f)
1877 util.audit_path(f)
1878 try:
1878 try:
1879 util.unlink(self.wjoin(f))
1879 util.unlink(self.wjoin(f))
1880 except OSError, inst:
1880 except OSError, inst:
1881 if inst.errno != errno.ENOENT:
1881 if inst.errno != errno.ENOENT:
1882 self.ui.warn(_("update failed to remove %s: %s!\n") %
1882 self.ui.warn(_("update failed to remove %s: %s!\n") %
1883 (f, inst.strerror))
1883 (f, inst.strerror))
1884 if moddirstate:
1884 if moddirstate:
1885 if branch_merge:
1885 if branch_merge:
1886 self.dirstate.update(remove, 'r')
1886 self.dirstate.update(remove, 'r')
1887 else:
1887 else:
1888 self.dirstate.forget(remove)
1888 self.dirstate.forget(remove)
1889
1889
1890 if moddirstate:
1890 if moddirstate:
1891 self.dirstate.setparents(p1, p2)
1891 self.dirstate.setparents(p1, p2)
1892
1892
1893 if show_stats:
1893 if show_stats:
1894 stats = ((len(get), _("updated")),
1894 stats = ((len(get), _("updated")),
1895 (len(merge) - len(failedmerge), _("merged")),
1895 (len(merge) - len(failedmerge), _("merged")),
1896 (len(remove), _("removed")),
1896 (len(remove), _("removed")),
1897 (len(failedmerge), _("unresolved")))
1897 (len(failedmerge), _("unresolved")))
1898 note = ", ".join([_("%d files %s") % s for s in stats])
1898 note = ", ".join([_("%d files %s") % s for s in stats])
1899 self.ui.status("%s\n" % note)
1899 self.ui.status("%s\n" % note)
1900 if moddirstate:
1900 if moddirstate:
1901 if branch_merge:
1901 if branch_merge:
1902 if failedmerge:
1902 if failedmerge:
1903 self.ui.status(_("There are unresolved merges,"
1903 self.ui.status(_("There are unresolved merges,"
1904 " you can redo the full merge using:\n"
1904 " you can redo the full merge using:\n"
1905 " hg update -C %s\n"
1905 " hg update -C %s\n"
1906 " hg merge %s\n"
1906 " hg merge %s\n"
1907 % (self.changelog.rev(p1),
1907 % (self.changelog.rev(p1),
1908 self.changelog.rev(p2))))
1908 self.changelog.rev(p2))))
1909 else:
1909 else:
1910 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1910 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1911 elif failedmerge:
1911 elif failedmerge:
1912 self.ui.status(_("There are unresolved merges with"
1912 self.ui.status(_("There are unresolved merges with"
1913 " locally modified files.\n"))
1913 " locally modified files.\n"))
1914
1914
1915 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1915 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1916 return err
1916 return err
1917
1917
1918 def merge3(self, fn, my, other, p1, p2):
1918 def merge3(self, fn, my, other, p1, p2):
1919 """perform a 3-way merge in the working directory"""
1919 """perform a 3-way merge in the working directory"""
1920
1920
1921 def temp(prefix, node):
1921 def temp(prefix, node):
1922 pre = "%s~%s." % (os.path.basename(fn), prefix)
1922 pre = "%s~%s." % (os.path.basename(fn), prefix)
1923 (fd, name) = tempfile.mkstemp(prefix=pre)
1923 (fd, name) = tempfile.mkstemp(prefix=pre)
1924 f = os.fdopen(fd, "wb")
1924 f = os.fdopen(fd, "wb")
1925 self.wwrite(fn, fl.read(node), f)
1925 self.wwrite(fn, fl.read(node), f)
1926 f.close()
1926 f.close()
1927 return name
1927 return name
1928
1928
1929 fl = self.file(fn)
1929 fl = self.file(fn)
1930 base = fl.ancestor(my, other)
1930 base = fl.ancestor(my, other)
1931 a = self.wjoin(fn)
1931 a = self.wjoin(fn)
1932 b = temp("base", base)
1932 b = temp("base", base)
1933 c = temp("other", other)
1933 c = temp("other", other)
1934
1934
1935 self.ui.note(_("resolving %s\n") % fn)
1935 self.ui.note(_("resolving %s\n") % fn)
1936 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1936 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1937 (fn, short(my), short(other), short(base)))
1937 (fn, short(my), short(other), short(base)))
1938
1938
1939 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1939 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1940 or "hgmerge")
1940 or "hgmerge")
1941 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1941 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1942 environ={'HG_FILE': fn,
1942 environ={'HG_FILE': fn,
1943 'HG_MY_NODE': p1,
1943 'HG_MY_NODE': p1,
1944 'HG_OTHER_NODE': p2,
1944 'HG_OTHER_NODE': p2,
1945 'HG_FILE_MY_NODE': hex(my),
1945 'HG_FILE_MY_NODE': hex(my),
1946 'HG_FILE_OTHER_NODE': hex(other),
1946 'HG_FILE_OTHER_NODE': hex(other),
1947 'HG_FILE_BASE_NODE': hex(base)})
1947 'HG_FILE_BASE_NODE': hex(base)})
1948 if r:
1948 if r:
1949 self.ui.warn(_("merging %s failed!\n") % fn)
1949 self.ui.warn(_("merging %s failed!\n") % fn)
1950
1950
1951 os.unlink(b)
1951 os.unlink(b)
1952 os.unlink(c)
1952 os.unlink(c)
1953 return r
1953 return r
1954
1954
1955 def verify(self):
1955 def verify(self):
1956 filelinkrevs = {}
1956 filelinkrevs = {}
1957 filenodes = {}
1957 filenodes = {}
1958 changesets = revisions = files = 0
1958 changesets = revisions = files = 0
1959 errors = [0]
1959 errors = [0]
1960 warnings = [0]
1960 warnings = [0]
1961 neededmanifests = {}
1961 neededmanifests = {}
1962
1962
1963 def err(msg):
1963 def err(msg):
1964 self.ui.warn(msg + "\n")
1964 self.ui.warn(msg + "\n")
1965 errors[0] += 1
1965 errors[0] += 1
1966
1966
1967 def warn(msg):
1967 def warn(msg):
1968 self.ui.warn(msg + "\n")
1968 self.ui.warn(msg + "\n")
1969 warnings[0] += 1
1969 warnings[0] += 1
1970
1970
1971 def checksize(obj, name):
1971 def checksize(obj, name):
1972 d = obj.checksize()
1972 d = obj.checksize()
1973 if d[0]:
1973 if d[0]:
1974 err(_("%s data length off by %d bytes") % (name, d[0]))
1974 err(_("%s data length off by %d bytes") % (name, d[0]))
1975 if d[1]:
1975 if d[1]:
1976 err(_("%s index contains %d extra bytes") % (name, d[1]))
1976 err(_("%s index contains %d extra bytes") % (name, d[1]))
1977
1977
1978 def checkversion(obj, name):
1978 def checkversion(obj, name):
1979 if obj.version != revlog.REVLOGV0:
1979 if obj.version != revlog.REVLOGV0:
1980 if not revlogv1:
1980 if not revlogv1:
1981 warn(_("warning: `%s' uses revlog format 1") % name)
1981 warn(_("warning: `%s' uses revlog format 1") % name)
1982 elif revlogv1:
1982 elif revlogv1:
1983 warn(_("warning: `%s' uses revlog format 0") % name)
1983 warn(_("warning: `%s' uses revlog format 0") % name)
1984
1984
1985 revlogv1 = self.revlogversion != revlog.REVLOGV0
1985 revlogv1 = self.revlogversion != revlog.REVLOGV0
1986 if self.ui.verbose or revlogv1 != self.revlogv1:
1986 if self.ui.verbose or revlogv1 != self.revlogv1:
1987 self.ui.status(_("repository uses revlog format %d\n") %
1987 self.ui.status(_("repository uses revlog format %d\n") %
1988 (revlogv1 and 1 or 0))
1988 (revlogv1 and 1 or 0))
1989
1989
1990 seen = {}
1990 seen = {}
1991 self.ui.status(_("checking changesets\n"))
1991 self.ui.status(_("checking changesets\n"))
1992 checksize(self.changelog, "changelog")
1992 checksize(self.changelog, "changelog")
1993
1993
1994 for i in range(self.changelog.count()):
1994 for i in range(self.changelog.count()):
1995 changesets += 1
1995 changesets += 1
1996 n = self.changelog.node(i)
1996 n = self.changelog.node(i)
1997 l = self.changelog.linkrev(n)
1997 l = self.changelog.linkrev(n)
1998 if l != i:
1998 if l != i:
1999 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1999 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
2000 if n in seen:
2000 if n in seen:
2001 err(_("duplicate changeset at revision %d") % i)
2001 err(_("duplicate changeset at revision %d") % i)
2002 seen[n] = 1
2002 seen[n] = 1
2003
2003
2004 for p in self.changelog.parents(n):
2004 for p in self.changelog.parents(n):
2005 if p not in self.changelog.nodemap:
2005 if p not in self.changelog.nodemap:
2006 err(_("changeset %s has unknown parent %s") %
2006 err(_("changeset %s has unknown parent %s") %
2007 (short(n), short(p)))
2007 (short(n), short(p)))
2008 try:
2008 try:
2009 changes = self.changelog.read(n)
2009 changes = self.changelog.read(n)
2010 except KeyboardInterrupt:
2010 except KeyboardInterrupt:
2011 self.ui.warn(_("interrupted"))
2011 self.ui.warn(_("interrupted"))
2012 raise
2012 raise
2013 except Exception, inst:
2013 except Exception, inst:
2014 err(_("unpacking changeset %s: %s") % (short(n), inst))
2014 err(_("unpacking changeset %s: %s") % (short(n), inst))
2015 continue
2015 continue
2016
2016
2017 neededmanifests[changes[0]] = n
2017 neededmanifests[changes[0]] = n
2018
2018
2019 for f in changes[3]:
2019 for f in changes[3]:
2020 filelinkrevs.setdefault(f, []).append(i)
2020 filelinkrevs.setdefault(f, []).append(i)
2021
2021
2022 seen = {}
2022 seen = {}
2023 self.ui.status(_("checking manifests\n"))
2023 self.ui.status(_("checking manifests\n"))
2024 checkversion(self.manifest, "manifest")
2024 checkversion(self.manifest, "manifest")
2025 checksize(self.manifest, "manifest")
2025 checksize(self.manifest, "manifest")
2026
2026
2027 for i in range(self.manifest.count()):
2027 for i in range(self.manifest.count()):
2028 n = self.manifest.node(i)
2028 n = self.manifest.node(i)
2029 l = self.manifest.linkrev(n)
2029 l = self.manifest.linkrev(n)
2030
2030
2031 if l < 0 or l >= self.changelog.count():
2031 if l < 0 or l >= self.changelog.count():
2032 err(_("bad manifest link (%d) at revision %d") % (l, i))
2032 err(_("bad manifest link (%d) at revision %d") % (l, i))
2033
2033
2034 if n in neededmanifests:
2034 if n in neededmanifests:
2035 del neededmanifests[n]
2035 del neededmanifests[n]
2036
2036
2037 if n in seen:
2037 if n in seen:
2038 err(_("duplicate manifest at revision %d") % i)
2038 err(_("duplicate manifest at revision %d") % i)
2039
2039
2040 seen[n] = 1
2040 seen[n] = 1
2041
2041
2042 for p in self.manifest.parents(n):
2042 for p in self.manifest.parents(n):
2043 if p not in self.manifest.nodemap:
2043 if p not in self.manifest.nodemap:
2044 err(_("manifest %s has unknown parent %s") %
2044 err(_("manifest %s has unknown parent %s") %
2045 (short(n), short(p)))
2045 (short(n), short(p)))
2046
2046
2047 try:
2047 try:
2048 delta = mdiff.patchtext(self.manifest.delta(n))
2048 delta = mdiff.patchtext(self.manifest.delta(n))
2049 except KeyboardInterrupt:
2049 except KeyboardInterrupt:
2050 self.ui.warn(_("interrupted"))
2050 self.ui.warn(_("interrupted"))
2051 raise
2051 raise
2052 except Exception, inst:
2052 except Exception, inst:
2053 err(_("unpacking manifest %s: %s") % (short(n), inst))
2053 err(_("unpacking manifest %s: %s") % (short(n), inst))
2054 continue
2054 continue
2055
2055
2056 try:
2056 try:
2057 ff = [ l.split('\0') for l in delta.splitlines() ]
2057 ff = [ l.split('\0') for l in delta.splitlines() ]
2058 for f, fn in ff:
2058 for f, fn in ff:
2059 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2059 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2060 except (ValueError, TypeError), inst:
2060 except (ValueError, TypeError), inst:
2061 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2061 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2062
2062
2063 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2063 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2064
2064
2065 for m, c in neededmanifests.items():
2065 for m, c in neededmanifests.items():
2066 err(_("Changeset %s refers to unknown manifest %s") %
2066 err(_("Changeset %s refers to unknown manifest %s") %
2067 (short(m), short(c)))
2067 (short(m), short(c)))
2068 del neededmanifests
2068 del neededmanifests
2069
2069
2070 for f in filenodes:
2070 for f in filenodes:
2071 if f not in filelinkrevs:
2071 if f not in filelinkrevs:
2072 err(_("file %s in manifest but not in changesets") % f)
2072 err(_("file %s in manifest but not in changesets") % f)
2073
2073
2074 for f in filelinkrevs:
2074 for f in filelinkrevs:
2075 if f not in filenodes:
2075 if f not in filenodes:
2076 err(_("file %s in changeset but not in manifest") % f)
2076 err(_("file %s in changeset but not in manifest") % f)
2077
2077
2078 self.ui.status(_("checking files\n"))
2078 self.ui.status(_("checking files\n"))
2079 ff = filenodes.keys()
2079 ff = filenodes.keys()
2080 ff.sort()
2080 ff.sort()
2081 for f in ff:
2081 for f in ff:
2082 if f == "/dev/null":
2082 if f == "/dev/null":
2083 continue
2083 continue
2084 files += 1
2084 files += 1
2085 if not f:
2085 if not f:
2086 err(_("file without name in manifest %s") % short(n))
2086 err(_("file without name in manifest %s") % short(n))
2087 continue
2087 continue
2088 fl = self.file(f)
2088 fl = self.file(f)
2089 checkversion(fl, f)
2089 checkversion(fl, f)
2090 checksize(fl, f)
2090 checksize(fl, f)
2091
2091
2092 nodes = {nullid: 1}
2092 nodes = {nullid: 1}
2093 seen = {}
2093 seen = {}
2094 for i in range(fl.count()):
2094 for i in range(fl.count()):
2095 revisions += 1
2095 revisions += 1
2096 n = fl.node(i)
2096 n = fl.node(i)
2097
2097
2098 if n in seen:
2098 if n in seen:
2099 err(_("%s: duplicate revision %d") % (f, i))
2099 err(_("%s: duplicate revision %d") % (f, i))
2100 if n not in filenodes[f]:
2100 if n not in filenodes[f]:
2101 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2101 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2102 else:
2102 else:
2103 del filenodes[f][n]
2103 del filenodes[f][n]
2104
2104
2105 flr = fl.linkrev(n)
2105 flr = fl.linkrev(n)
2106 if flr not in filelinkrevs.get(f, []):
2106 if flr not in filelinkrevs.get(f, []):
2107 err(_("%s:%s points to unexpected changeset %d")
2107 err(_("%s:%s points to unexpected changeset %d")
2108 % (f, short(n), flr))
2108 % (f, short(n), flr))
2109 else:
2109 else:
2110 filelinkrevs[f].remove(flr)
2110 filelinkrevs[f].remove(flr)
2111
2111
2112 # verify contents
2112 # verify contents
2113 try:
2113 try:
2114 t = fl.read(n)
2114 t = fl.read(n)
2115 except KeyboardInterrupt:
2115 except KeyboardInterrupt:
2116 self.ui.warn(_("interrupted"))
2116 self.ui.warn(_("interrupted"))
2117 raise
2117 raise
2118 except Exception, inst:
2118 except Exception, inst:
2119 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2119 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2120
2120
2121 # verify parents
2121 # verify parents
2122 (p1, p2) = fl.parents(n)
2122 (p1, p2) = fl.parents(n)
2123 if p1 not in nodes:
2123 if p1 not in nodes:
2124 err(_("file %s:%s unknown parent 1 %s") %
2124 err(_("file %s:%s unknown parent 1 %s") %
2125 (f, short(n), short(p1)))
2125 (f, short(n), short(p1)))
2126 if p2 not in nodes:
2126 if p2 not in nodes:
2127 err(_("file %s:%s unknown parent 2 %s") %
2127 err(_("file %s:%s unknown parent 2 %s") %
2128 (f, short(n), short(p1)))
2128 (f, short(n), short(p1)))
2129 nodes[n] = 1
2129 nodes[n] = 1
2130
2130
2131 # cross-check
2131 # cross-check
2132 for node in filenodes[f]:
2132 for node in filenodes[f]:
2133 err(_("node %s in manifests not in %s") % (hex(node), f))
2133 err(_("node %s in manifests not in %s") % (hex(node), f))
2134
2134
2135 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2135 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2136 (files, changesets, revisions))
2136 (files, changesets, revisions))
2137
2137
2138 if warnings[0]:
2138 if warnings[0]:
2139 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2139 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2140 if errors[0]:
2140 if errors[0]:
2141 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2141 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2142 return 1
2142 return 1
2143
2143
2144 # used to avoid circular references so destructors work
2144 # used to avoid circular references so destructors work
2145 def aftertrans(base):
2145 def aftertrans(base):
2146 p = base
2146 p = base
2147 def a():
2147 def a():
2148 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2148 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2149 util.rename(os.path.join(p, "journal.dirstate"),
2149 util.rename(os.path.join(p, "journal.dirstate"),
2150 os.path.join(p, "undo.dirstate"))
2150 os.path.join(p, "undo.dirstate"))
2151 return a
2151 return a
2152
2152
General Comments 0
You need to be logged in to leave comments. Login now