##// END OF EJS Templates
fix parsing of tags. make parse errors useful. add new tag tests....
Vadim Gelfer -
r2320:dbdce3b9 default
parent child Browse files
Show More
@@ -1,2102 +1,2109
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "revlog traceback")
15 demandload(globals(), "revlog traceback")
16
16
17 class localrepository(object):
17 class localrepository(object):
18 def __del__(self):
18 def __del__(self):
19 self.transhandle = None
19 self.transhandle = None
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 if not path:
21 if not path:
22 p = os.getcwd()
22 p = os.getcwd()
23 while not os.path.isdir(os.path.join(p, ".hg")):
23 while not os.path.isdir(os.path.join(p, ".hg")):
24 oldp = p
24 oldp = p
25 p = os.path.dirname(p)
25 p = os.path.dirname(p)
26 if p == oldp:
26 if p == oldp:
27 raise repo.RepoError(_("no repo found"))
27 raise repo.RepoError(_("no repo found"))
28 path = p
28 path = p
29 self.path = os.path.join(path, ".hg")
29 self.path = os.path.join(path, ".hg")
30
30
31 if not create and not os.path.isdir(self.path):
31 if not create and not os.path.isdir(self.path):
32 raise repo.RepoError(_("repository %s not found") % path)
32 raise repo.RepoError(_("repository %s not found") % path)
33
33
34 self.root = os.path.abspath(path)
34 self.root = os.path.abspath(path)
35 self.origroot = path
35 self.origroot = path
36 self.ui = ui.ui(parentui=parentui)
36 self.ui = ui.ui(parentui=parentui)
37 self.opener = util.opener(self.path)
37 self.opener = util.opener(self.path)
38 self.wopener = util.opener(self.root)
38 self.wopener = util.opener(self.root)
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 v = self.ui.revlogopts
45 v = self.ui.revlogopts
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 fl = v.get('flags', None)
48 fl = v.get('flags', None)
49 flags = 0
49 flags = 0
50 if fl != None:
50 if fl != None:
51 for x in fl.split():
51 for x in fl.split():
52 flags |= revlog.flagstr(x)
52 flags |= revlog.flagstr(x)
53 elif self.revlogv1:
53 elif self.revlogv1:
54 flags = revlog.REVLOG_DEFAULT_FLAGS
54 flags = revlog.REVLOG_DEFAULT_FLAGS
55
55
56 v = self.revlogversion | flags
56 v = self.revlogversion | flags
57 self.manifest = manifest.manifest(self.opener, v)
57 self.manifest = manifest.manifest(self.opener, v)
58 self.changelog = changelog.changelog(self.opener, v)
58 self.changelog = changelog.changelog(self.opener, v)
59
59
60 # the changelog might not have the inline index flag
60 # the changelog might not have the inline index flag
61 # on. If the format of the changelog is the same as found in
61 # on. If the format of the changelog is the same as found in
62 # .hgrc, apply any flags found in the .hgrc as well.
62 # .hgrc, apply any flags found in the .hgrc as well.
63 # Otherwise, just version from the changelog
63 # Otherwise, just version from the changelog
64 v = self.changelog.version
64 v = self.changelog.version
65 if v == self.revlogversion:
65 if v == self.revlogversion:
66 v |= flags
66 v |= flags
67 self.revlogversion = v
67 self.revlogversion = v
68
68
69 self.tagscache = None
69 self.tagscache = None
70 self.nodetagscache = None
70 self.nodetagscache = None
71 self.encodepats = None
71 self.encodepats = None
72 self.decodepats = None
72 self.decodepats = None
73 self.transhandle = None
73 self.transhandle = None
74
74
75 if create:
75 if create:
76 os.mkdir(self.path)
76 os.mkdir(self.path)
77 os.mkdir(self.join("data"))
77 os.mkdir(self.join("data"))
78
78
79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80
80
81 def hook(self, name, throw=False, **args):
81 def hook(self, name, throw=False, **args):
82 def callhook(hname, funcname):
82 def callhook(hname, funcname):
83 '''call python hook. hook is callable object, looked up as
83 '''call python hook. hook is callable object, looked up as
84 name in python module. if callable returns "true", hook
84 name in python module. if callable returns "true", hook
85 fails, else passes. if hook raises exception, treated as
85 fails, else passes. if hook raises exception, treated as
86 hook failure. exception propagates if throw is "true".
86 hook failure. exception propagates if throw is "true".
87
87
88 reason for "true" meaning "hook failed" is so that
88 reason for "true" meaning "hook failed" is so that
89 unmodified commands (e.g. mercurial.commands.update) can
89 unmodified commands (e.g. mercurial.commands.update) can
90 be run as hooks without wrappers to convert return values.'''
90 be run as hooks without wrappers to convert return values.'''
91
91
92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 d = funcname.rfind('.')
93 d = funcname.rfind('.')
94 if d == -1:
94 if d == -1:
95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 % (hname, funcname))
96 % (hname, funcname))
97 modname = funcname[:d]
97 modname = funcname[:d]
98 try:
98 try:
99 obj = __import__(modname)
99 obj = __import__(modname)
100 except ImportError:
100 except ImportError:
101 raise util.Abort(_('%s hook is invalid '
101 raise util.Abort(_('%s hook is invalid '
102 '(import of "%s" failed)') %
102 '(import of "%s" failed)') %
103 (hname, modname))
103 (hname, modname))
104 try:
104 try:
105 for p in funcname.split('.')[1:]:
105 for p in funcname.split('.')[1:]:
106 obj = getattr(obj, p)
106 obj = getattr(obj, p)
107 except AttributeError, err:
107 except AttributeError, err:
108 raise util.Abort(_('%s hook is invalid '
108 raise util.Abort(_('%s hook is invalid '
109 '("%s" is not defined)') %
109 '("%s" is not defined)') %
110 (hname, funcname))
110 (hname, funcname))
111 if not callable(obj):
111 if not callable(obj):
112 raise util.Abort(_('%s hook is invalid '
112 raise util.Abort(_('%s hook is invalid '
113 '("%s" is not callable)') %
113 '("%s" is not callable)') %
114 (hname, funcname))
114 (hname, funcname))
115 try:
115 try:
116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 except (KeyboardInterrupt, util.SignalInterrupt):
117 except (KeyboardInterrupt, util.SignalInterrupt):
118 raise
118 raise
119 except Exception, exc:
119 except Exception, exc:
120 if isinstance(exc, util.Abort):
120 if isinstance(exc, util.Abort):
121 self.ui.warn(_('error: %s hook failed: %s\n') %
121 self.ui.warn(_('error: %s hook failed: %s\n') %
122 (hname, exc.args[0] % exc.args[1:]))
122 (hname, exc.args[0] % exc.args[1:]))
123 else:
123 else:
124 self.ui.warn(_('error: %s hook raised an exception: '
124 self.ui.warn(_('error: %s hook raised an exception: '
125 '%s\n') % (hname, exc))
125 '%s\n') % (hname, exc))
126 if throw:
126 if throw:
127 raise
127 raise
128 if self.ui.traceback:
128 if self.ui.traceback:
129 traceback.print_exc()
129 traceback.print_exc()
130 return True
130 return True
131 if r:
131 if r:
132 if throw:
132 if throw:
133 raise util.Abort(_('%s hook failed') % hname)
133 raise util.Abort(_('%s hook failed') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 return r
135 return r
136
136
137 def runhook(name, cmd):
137 def runhook(name, cmd):
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
140 r = util.system(cmd, environ=env, cwd=self.root)
140 r = util.system(cmd, environ=env, cwd=self.root)
141 if r:
141 if r:
142 desc, r = util.explain_exit(r)
142 desc, r = util.explain_exit(r)
143 if throw:
143 if throw:
144 raise util.Abort(_('%s hook %s') % (name, desc))
144 raise util.Abort(_('%s hook %s') % (name, desc))
145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 return r
146 return r
147
147
148 r = False
148 r = False
149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 if hname.split(".", 1)[0] == name and cmd]
150 if hname.split(".", 1)[0] == name and cmd]
151 hooks.sort()
151 hooks.sort()
152 for hname, cmd in hooks:
152 for hname, cmd in hooks:
153 if cmd.startswith('python:'):
153 if cmd.startswith('python:'):
154 r = callhook(hname, cmd[7:].strip()) or r
154 r = callhook(hname, cmd[7:].strip()) or r
155 else:
155 else:
156 r = runhook(hname, cmd) or r
156 r = runhook(hname, cmd) or r
157 return r
157 return r
158
158
159 def tags(self):
159 def tags(self):
160 '''return a mapping of tag to node'''
160 '''return a mapping of tag to node'''
161 if not self.tagscache:
161 if not self.tagscache:
162 self.tagscache = {}
162 self.tagscache = {}
163
163
164 def parsetag(line, context):
164 def parsetag(line, context):
165 if not line:
165 if not line:
166 return
166 return
167 s = l.split(" ", 1)
167 s = l.split(" ", 1)
168 if len(s) != 2:
168 if len(s) != 2:
169 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
169 self.ui.warn(_("%s: cannot parse entry\n") % context)
170 return
170 return
171 node, key = s
171 node, key = s
172 key = key.strip()
172 try:
173 try:
173 bin_n = bin(node)
174 bin_n = bin(node)
174 except TypeError:
175 except TypeError:
175 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
177 (context, node))
176 return
178 return
177 if bin_n not in self.changelog.nodemap:
179 if bin_n not in self.changelog.nodemap:
178 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
181 (context, key))
179 return
182 return
180 self.tagscache[key.strip()] = bin_n
183 self.tagscache[key] = bin_n
181
184
182 # read each head of the tags file, ending with the tip
185 # read the tags file from each head, ending with the tip,
183 # and add each tag found to the map, with "newer" ones
186 # and add each tag found to the map, with "newer" ones
184 # taking precedence
187 # taking precedence
188 heads = self.heads()
189 heads.reverse()
185 fl = self.file(".hgtags")
190 fl = self.file(".hgtags")
186 h = fl.heads()
191 for node in heads:
187 h.reverse()
192 change = self.changelog.read(node)
188 for r in h:
193 rev = self.changelog.rev(node)
194 fn, ff = self.manifest.find(change[0], '.hgtags')
195 if fn is None: continue
189 count = 0
196 count = 0
190 for l in fl.read(r).splitlines():
197 for l in fl.read(fn).splitlines():
191 count += 1
198 count += 1
192 parsetag(l, ".hgtags:%d" % count)
199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
193
200 (rev, short(node), count))
194 try:
201 try:
195 f = self.opener("localtags")
202 f = self.opener("localtags")
196 count = 0
203 count = 0
197 for l in f:
204 for l in f:
198 count += 1
205 count += 1
199 parsetag(l, "localtags:%d" % count)
206 parsetag(l, _("localtags, line %d") % count)
200 except IOError:
207 except IOError:
201 pass
208 pass
202
209
203 self.tagscache['tip'] = self.changelog.tip()
210 self.tagscache['tip'] = self.changelog.tip()
204
211
205 return self.tagscache
212 return self.tagscache
206
213
207 def tagslist(self):
214 def tagslist(self):
208 '''return a list of tags ordered by revision'''
215 '''return a list of tags ordered by revision'''
209 l = []
216 l = []
210 for t, n in self.tags().items():
217 for t, n in self.tags().items():
211 try:
218 try:
212 r = self.changelog.rev(n)
219 r = self.changelog.rev(n)
213 except:
220 except:
214 r = -2 # sort to the beginning of the list if unknown
221 r = -2 # sort to the beginning of the list if unknown
215 l.append((r, t, n))
222 l.append((r, t, n))
216 l.sort()
223 l.sort()
217 return [(t, n) for r, t, n in l]
224 return [(t, n) for r, t, n in l]
218
225
219 def nodetags(self, node):
226 def nodetags(self, node):
220 '''return the tags associated with a node'''
227 '''return the tags associated with a node'''
221 if not self.nodetagscache:
228 if not self.nodetagscache:
222 self.nodetagscache = {}
229 self.nodetagscache = {}
223 for t, n in self.tags().items():
230 for t, n in self.tags().items():
224 self.nodetagscache.setdefault(n, []).append(t)
231 self.nodetagscache.setdefault(n, []).append(t)
225 return self.nodetagscache.get(node, [])
232 return self.nodetagscache.get(node, [])
226
233
227 def lookup(self, key):
234 def lookup(self, key):
228 try:
235 try:
229 return self.tags()[key]
236 return self.tags()[key]
230 except KeyError:
237 except KeyError:
231 try:
238 try:
232 return self.changelog.lookup(key)
239 return self.changelog.lookup(key)
233 except:
240 except:
234 raise repo.RepoError(_("unknown revision '%s'") % key)
241 raise repo.RepoError(_("unknown revision '%s'") % key)
235
242
236 def dev(self):
243 def dev(self):
237 return os.stat(self.path).st_dev
244 return os.stat(self.path).st_dev
238
245
239 def local(self):
246 def local(self):
240 return True
247 return True
241
248
242 def join(self, f):
249 def join(self, f):
243 return os.path.join(self.path, f)
250 return os.path.join(self.path, f)
244
251
245 def wjoin(self, f):
252 def wjoin(self, f):
246 return os.path.join(self.root, f)
253 return os.path.join(self.root, f)
247
254
248 def file(self, f):
255 def file(self, f):
249 if f[0] == '/':
256 if f[0] == '/':
250 f = f[1:]
257 f = f[1:]
251 return filelog.filelog(self.opener, f, self.revlogversion)
258 return filelog.filelog(self.opener, f, self.revlogversion)
252
259
253 def getcwd(self):
260 def getcwd(self):
254 return self.dirstate.getcwd()
261 return self.dirstate.getcwd()
255
262
256 def wfile(self, f, mode='r'):
263 def wfile(self, f, mode='r'):
257 return self.wopener(f, mode)
264 return self.wopener(f, mode)
258
265
259 def wread(self, filename):
266 def wread(self, filename):
260 if self.encodepats == None:
267 if self.encodepats == None:
261 l = []
268 l = []
262 for pat, cmd in self.ui.configitems("encode"):
269 for pat, cmd in self.ui.configitems("encode"):
263 mf = util.matcher(self.root, "", [pat], [], [])[1]
270 mf = util.matcher(self.root, "", [pat], [], [])[1]
264 l.append((mf, cmd))
271 l.append((mf, cmd))
265 self.encodepats = l
272 self.encodepats = l
266
273
267 data = self.wopener(filename, 'r').read()
274 data = self.wopener(filename, 'r').read()
268
275
269 for mf, cmd in self.encodepats:
276 for mf, cmd in self.encodepats:
270 if mf(filename):
277 if mf(filename):
271 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
272 data = util.filter(data, cmd)
279 data = util.filter(data, cmd)
273 break
280 break
274
281
275 return data
282 return data
276
283
277 def wwrite(self, filename, data, fd=None):
284 def wwrite(self, filename, data, fd=None):
278 if self.decodepats == None:
285 if self.decodepats == None:
279 l = []
286 l = []
280 for pat, cmd in self.ui.configitems("decode"):
287 for pat, cmd in self.ui.configitems("decode"):
281 mf = util.matcher(self.root, "", [pat], [], [])[1]
288 mf = util.matcher(self.root, "", [pat], [], [])[1]
282 l.append((mf, cmd))
289 l.append((mf, cmd))
283 self.decodepats = l
290 self.decodepats = l
284
291
285 for mf, cmd in self.decodepats:
292 for mf, cmd in self.decodepats:
286 if mf(filename):
293 if mf(filename):
287 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
288 data = util.filter(data, cmd)
295 data = util.filter(data, cmd)
289 break
296 break
290
297
291 if fd:
298 if fd:
292 return fd.write(data)
299 return fd.write(data)
293 return self.wopener(filename, 'w').write(data)
300 return self.wopener(filename, 'w').write(data)
294
301
295 def transaction(self):
302 def transaction(self):
296 tr = self.transhandle
303 tr = self.transhandle
297 if tr != None and tr.running():
304 if tr != None and tr.running():
298 return tr.nest()
305 return tr.nest()
299
306
300 # save dirstate for undo
307 # save dirstate for undo
301 try:
308 try:
302 ds = self.opener("dirstate").read()
309 ds = self.opener("dirstate").read()
303 except IOError:
310 except IOError:
304 ds = ""
311 ds = ""
305 self.opener("journal.dirstate", "w").write(ds)
312 self.opener("journal.dirstate", "w").write(ds)
306
313
307 tr = transaction.transaction(self.ui.warn, self.opener,
314 tr = transaction.transaction(self.ui.warn, self.opener,
308 self.join("journal"),
315 self.join("journal"),
309 aftertrans(self.path))
316 aftertrans(self.path))
310 self.transhandle = tr
317 self.transhandle = tr
311 return tr
318 return tr
312
319
313 def recover(self):
320 def recover(self):
314 l = self.lock()
321 l = self.lock()
315 if os.path.exists(self.join("journal")):
322 if os.path.exists(self.join("journal")):
316 self.ui.status(_("rolling back interrupted transaction\n"))
323 self.ui.status(_("rolling back interrupted transaction\n"))
317 transaction.rollback(self.opener, self.join("journal"))
324 transaction.rollback(self.opener, self.join("journal"))
318 self.reload()
325 self.reload()
319 return True
326 return True
320 else:
327 else:
321 self.ui.warn(_("no interrupted transaction available\n"))
328 self.ui.warn(_("no interrupted transaction available\n"))
322 return False
329 return False
323
330
324 def undo(self, wlock=None):
331 def undo(self, wlock=None):
325 if not wlock:
332 if not wlock:
326 wlock = self.wlock()
333 wlock = self.wlock()
327 l = self.lock()
334 l = self.lock()
328 if os.path.exists(self.join("undo")):
335 if os.path.exists(self.join("undo")):
329 self.ui.status(_("rolling back last transaction\n"))
336 self.ui.status(_("rolling back last transaction\n"))
330 transaction.rollback(self.opener, self.join("undo"))
337 transaction.rollback(self.opener, self.join("undo"))
331 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
332 self.reload()
339 self.reload()
333 self.wreload()
340 self.wreload()
334 else:
341 else:
335 self.ui.warn(_("no undo information available\n"))
342 self.ui.warn(_("no undo information available\n"))
336
343
337 def wreload(self):
344 def wreload(self):
338 self.dirstate.read()
345 self.dirstate.read()
339
346
340 def reload(self):
347 def reload(self):
341 self.changelog.load()
348 self.changelog.load()
342 self.manifest.load()
349 self.manifest.load()
343 self.tagscache = None
350 self.tagscache = None
344 self.nodetagscache = None
351 self.nodetagscache = None
345
352
346 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
347 desc=None):
354 desc=None):
348 try:
355 try:
349 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
350 except lock.LockHeld, inst:
357 except lock.LockHeld, inst:
351 if not wait:
358 if not wait:
352 raise
359 raise
353 self.ui.warn(_("waiting for lock on %s held by %s\n") %
360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
354 (desc, inst.args[0]))
361 (desc, inst.args[0]))
355 # default to 600 seconds timeout
362 # default to 600 seconds timeout
356 l = lock.lock(self.join(lockname),
363 l = lock.lock(self.join(lockname),
357 int(self.ui.config("ui", "timeout") or 600),
364 int(self.ui.config("ui", "timeout") or 600),
358 releasefn, desc=desc)
365 releasefn, desc=desc)
359 if acquirefn:
366 if acquirefn:
360 acquirefn()
367 acquirefn()
361 return l
368 return l
362
369
363 def lock(self, wait=1):
370 def lock(self, wait=1):
364 return self.do_lock("lock", wait, acquirefn=self.reload,
371 return self.do_lock("lock", wait, acquirefn=self.reload,
365 desc=_('repository %s') % self.origroot)
372 desc=_('repository %s') % self.origroot)
366
373
367 def wlock(self, wait=1):
374 def wlock(self, wait=1):
368 return self.do_lock("wlock", wait, self.dirstate.write,
375 return self.do_lock("wlock", wait, self.dirstate.write,
369 self.wreload,
376 self.wreload,
370 desc=_('working directory of %s') % self.origroot)
377 desc=_('working directory of %s') % self.origroot)
371
378
372 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
373 "determine whether a new filenode is needed"
380 "determine whether a new filenode is needed"
374 fp1 = manifest1.get(filename, nullid)
381 fp1 = manifest1.get(filename, nullid)
375 fp2 = manifest2.get(filename, nullid)
382 fp2 = manifest2.get(filename, nullid)
376
383
377 if fp2 != nullid:
384 if fp2 != nullid:
378 # is one parent an ancestor of the other?
385 # is one parent an ancestor of the other?
379 fpa = filelog.ancestor(fp1, fp2)
386 fpa = filelog.ancestor(fp1, fp2)
380 if fpa == fp1:
387 if fpa == fp1:
381 fp1, fp2 = fp2, nullid
388 fp1, fp2 = fp2, nullid
382 elif fpa == fp2:
389 elif fpa == fp2:
383 fp2 = nullid
390 fp2 = nullid
384
391
385 # is the file unmodified from the parent? report existing entry
392 # is the file unmodified from the parent? report existing entry
386 if fp2 == nullid and text == filelog.read(fp1):
393 if fp2 == nullid and text == filelog.read(fp1):
387 return (fp1, None, None)
394 return (fp1, None, None)
388
395
389 return (None, fp1, fp2)
396 return (None, fp1, fp2)
390
397
391 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
392 orig_parent = self.dirstate.parents()[0] or nullid
399 orig_parent = self.dirstate.parents()[0] or nullid
393 p1 = p1 or self.dirstate.parents()[0] or nullid
400 p1 = p1 or self.dirstate.parents()[0] or nullid
394 p2 = p2 or self.dirstate.parents()[1] or nullid
401 p2 = p2 or self.dirstate.parents()[1] or nullid
395 c1 = self.changelog.read(p1)
402 c1 = self.changelog.read(p1)
396 c2 = self.changelog.read(p2)
403 c2 = self.changelog.read(p2)
397 m1 = self.manifest.read(c1[0])
404 m1 = self.manifest.read(c1[0])
398 mf1 = self.manifest.readflags(c1[0])
405 mf1 = self.manifest.readflags(c1[0])
399 m2 = self.manifest.read(c2[0])
406 m2 = self.manifest.read(c2[0])
400 changed = []
407 changed = []
401
408
402 if orig_parent == p1:
409 if orig_parent == p1:
403 update_dirstate = 1
410 update_dirstate = 1
404 else:
411 else:
405 update_dirstate = 0
412 update_dirstate = 0
406
413
407 if not wlock:
414 if not wlock:
408 wlock = self.wlock()
415 wlock = self.wlock()
409 l = self.lock()
416 l = self.lock()
410 tr = self.transaction()
417 tr = self.transaction()
411 mm = m1.copy()
418 mm = m1.copy()
412 mfm = mf1.copy()
419 mfm = mf1.copy()
413 linkrev = self.changelog.count()
420 linkrev = self.changelog.count()
414 for f in files:
421 for f in files:
415 try:
422 try:
416 t = self.wread(f)
423 t = self.wread(f)
417 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
418 r = self.file(f)
425 r = self.file(f)
419 mfm[f] = tm
426 mfm[f] = tm
420
427
421 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
422 if entry:
429 if entry:
423 mm[f] = entry
430 mm[f] = entry
424 continue
431 continue
425
432
426 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
427 changed.append(f)
434 changed.append(f)
428 if update_dirstate:
435 if update_dirstate:
429 self.dirstate.update([f], "n")
436 self.dirstate.update([f], "n")
430 except IOError:
437 except IOError:
431 try:
438 try:
432 del mm[f]
439 del mm[f]
433 del mfm[f]
440 del mfm[f]
434 if update_dirstate:
441 if update_dirstate:
435 self.dirstate.forget([f])
442 self.dirstate.forget([f])
436 except:
443 except:
437 # deleted from p2?
444 # deleted from p2?
438 pass
445 pass
439
446
440 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
441 user = user or self.ui.username()
448 user = user or self.ui.username()
442 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
443 tr.close()
450 tr.close()
444 if update_dirstate:
451 if update_dirstate:
445 self.dirstate.setparents(n, nullid)
452 self.dirstate.setparents(n, nullid)
446
453
447 def commit(self, files=None, text="", user=None, date=None,
454 def commit(self, files=None, text="", user=None, date=None,
448 match=util.always, force=False, lock=None, wlock=None,
455 match=util.always, force=False, lock=None, wlock=None,
449 force_editor=False):
456 force_editor=False):
450 commit = []
457 commit = []
451 remove = []
458 remove = []
452 changed = []
459 changed = []
453
460
454 if files:
461 if files:
455 for f in files:
462 for f in files:
456 s = self.dirstate.state(f)
463 s = self.dirstate.state(f)
457 if s in 'nmai':
464 if s in 'nmai':
458 commit.append(f)
465 commit.append(f)
459 elif s == 'r':
466 elif s == 'r':
460 remove.append(f)
467 remove.append(f)
461 else:
468 else:
462 self.ui.warn(_("%s not tracked!\n") % f)
469 self.ui.warn(_("%s not tracked!\n") % f)
463 else:
470 else:
464 modified, added, removed, deleted, unknown = self.changes(match=match)
471 modified, added, removed, deleted, unknown = self.changes(match=match)
465 commit = modified + added
472 commit = modified + added
466 remove = removed
473 remove = removed
467
474
468 p1, p2 = self.dirstate.parents()
475 p1, p2 = self.dirstate.parents()
469 c1 = self.changelog.read(p1)
476 c1 = self.changelog.read(p1)
470 c2 = self.changelog.read(p2)
477 c2 = self.changelog.read(p2)
471 m1 = self.manifest.read(c1[0])
478 m1 = self.manifest.read(c1[0])
472 mf1 = self.manifest.readflags(c1[0])
479 mf1 = self.manifest.readflags(c1[0])
473 m2 = self.manifest.read(c2[0])
480 m2 = self.manifest.read(c2[0])
474
481
475 if not commit and not remove and not force and p2 == nullid:
482 if not commit and not remove and not force and p2 == nullid:
476 self.ui.status(_("nothing changed\n"))
483 self.ui.status(_("nothing changed\n"))
477 return None
484 return None
478
485
479 xp1 = hex(p1)
486 xp1 = hex(p1)
480 if p2 == nullid: xp2 = ''
487 if p2 == nullid: xp2 = ''
481 else: xp2 = hex(p2)
488 else: xp2 = hex(p2)
482
489
483 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
484
491
485 if not wlock:
492 if not wlock:
486 wlock = self.wlock()
493 wlock = self.wlock()
487 if not lock:
494 if not lock:
488 lock = self.lock()
495 lock = self.lock()
489 tr = self.transaction()
496 tr = self.transaction()
490
497
491 # check in files
498 # check in files
492 new = {}
499 new = {}
493 linkrev = self.changelog.count()
500 linkrev = self.changelog.count()
494 commit.sort()
501 commit.sort()
495 for f in commit:
502 for f in commit:
496 self.ui.note(f + "\n")
503 self.ui.note(f + "\n")
497 try:
504 try:
498 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
499 t = self.wread(f)
506 t = self.wread(f)
500 except IOError:
507 except IOError:
501 self.ui.warn(_("trouble committing %s!\n") % f)
508 self.ui.warn(_("trouble committing %s!\n") % f)
502 raise
509 raise
503
510
504 r = self.file(f)
511 r = self.file(f)
505
512
506 meta = {}
513 meta = {}
507 cp = self.dirstate.copied(f)
514 cp = self.dirstate.copied(f)
508 if cp:
515 if cp:
509 meta["copy"] = cp
516 meta["copy"] = cp
510 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
511 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
512 fp1, fp2 = nullid, nullid
519 fp1, fp2 = nullid, nullid
513 else:
520 else:
514 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
515 if entry:
522 if entry:
516 new[f] = entry
523 new[f] = entry
517 continue
524 continue
518
525
519 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
520 # remember what we've added so that we can later calculate
527 # remember what we've added so that we can later calculate
521 # the files to pull from a set of changesets
528 # the files to pull from a set of changesets
522 changed.append(f)
529 changed.append(f)
523
530
524 # update manifest
531 # update manifest
525 m1 = m1.copy()
532 m1 = m1.copy()
526 m1.update(new)
533 m1.update(new)
527 for f in remove:
534 for f in remove:
528 if f in m1:
535 if f in m1:
529 del m1[f]
536 del m1[f]
530 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
531 (new, remove))
538 (new, remove))
532
539
533 # add changeset
540 # add changeset
534 new = new.keys()
541 new = new.keys()
535 new.sort()
542 new.sort()
536
543
537 user = user or self.ui.username()
544 user = user or self.ui.username()
538 if not text or force_editor:
545 if not text or force_editor:
539 edittext = []
546 edittext = []
540 if text:
547 if text:
541 edittext.append(text)
548 edittext.append(text)
542 edittext.append("")
549 edittext.append("")
543 if p2 != nullid:
550 if p2 != nullid:
544 edittext.append("HG: branch merge")
551 edittext.append("HG: branch merge")
545 edittext.extend(["HG: changed %s" % f for f in changed])
552 edittext.extend(["HG: changed %s" % f for f in changed])
546 edittext.extend(["HG: removed %s" % f for f in remove])
553 edittext.extend(["HG: removed %s" % f for f in remove])
547 if not changed and not remove:
554 if not changed and not remove:
548 edittext.append("HG: no files changed")
555 edittext.append("HG: no files changed")
549 edittext.append("")
556 edittext.append("")
550 # run editor in the repository root
557 # run editor in the repository root
551 olddir = os.getcwd()
558 olddir = os.getcwd()
552 os.chdir(self.root)
559 os.chdir(self.root)
553 text = self.ui.edit("\n".join(edittext), user)
560 text = self.ui.edit("\n".join(edittext), user)
554 os.chdir(olddir)
561 os.chdir(olddir)
555
562
556 lines = [line.rstrip() for line in text.rstrip().splitlines()]
563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
557 while lines and not lines[0]:
564 while lines and not lines[0]:
558 del lines[0]
565 del lines[0]
559 if not lines:
566 if not lines:
560 return None
567 return None
561 text = '\n'.join(lines)
568 text = '\n'.join(lines)
562 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
563 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
564 parent2=xp2)
571 parent2=xp2)
565 tr.close()
572 tr.close()
566
573
567 self.dirstate.setparents(n)
574 self.dirstate.setparents(n)
568 self.dirstate.update(new, "n")
575 self.dirstate.update(new, "n")
569 self.dirstate.forget(remove)
576 self.dirstate.forget(remove)
570
577
571 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
572 return n
579 return n
573
580
574 def walk(self, node=None, files=[], match=util.always, badmatch=None):
581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
575 if node:
582 if node:
576 fdict = dict.fromkeys(files)
583 fdict = dict.fromkeys(files)
577 for fn in self.manifest.read(self.changelog.read(node)[0]):
584 for fn in self.manifest.read(self.changelog.read(node)[0]):
578 fdict.pop(fn, None)
585 fdict.pop(fn, None)
579 if match(fn):
586 if match(fn):
580 yield 'm', fn
587 yield 'm', fn
581 for fn in fdict:
588 for fn in fdict:
582 if badmatch and badmatch(fn):
589 if badmatch and badmatch(fn):
583 if match(fn):
590 if match(fn):
584 yield 'b', fn
591 yield 'b', fn
585 else:
592 else:
586 self.ui.warn(_('%s: No such file in rev %s\n') % (
593 self.ui.warn(_('%s: No such file in rev %s\n') % (
587 util.pathto(self.getcwd(), fn), short(node)))
594 util.pathto(self.getcwd(), fn), short(node)))
588 else:
595 else:
589 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
590 yield src, fn
597 yield src, fn
591
598
592 def changes(self, node1=None, node2=None, files=[], match=util.always,
599 def changes(self, node1=None, node2=None, files=[], match=util.always,
593 wlock=None, show_ignored=None):
600 wlock=None, show_ignored=None):
594 """return changes between two nodes or node and working directory
601 """return changes between two nodes or node and working directory
595
602
596 If node1 is None, use the first dirstate parent instead.
603 If node1 is None, use the first dirstate parent instead.
597 If node2 is None, compare node1 with working directory.
604 If node2 is None, compare node1 with working directory.
598 """
605 """
599
606
600 def fcmp(fn, mf):
607 def fcmp(fn, mf):
601 t1 = self.wread(fn)
608 t1 = self.wread(fn)
602 t2 = self.file(fn).read(mf.get(fn, nullid))
609 t2 = self.file(fn).read(mf.get(fn, nullid))
603 return cmp(t1, t2)
610 return cmp(t1, t2)
604
611
605 def mfmatches(node):
612 def mfmatches(node):
606 change = self.changelog.read(node)
613 change = self.changelog.read(node)
607 mf = dict(self.manifest.read(change[0]))
614 mf = dict(self.manifest.read(change[0]))
608 for fn in mf.keys():
615 for fn in mf.keys():
609 if not match(fn):
616 if not match(fn):
610 del mf[fn]
617 del mf[fn]
611 return mf
618 return mf
612
619
613 if node1:
620 if node1:
614 # read the manifest from node1 before the manifest from node2,
621 # read the manifest from node1 before the manifest from node2,
615 # so that we'll hit the manifest cache if we're going through
622 # so that we'll hit the manifest cache if we're going through
616 # all the revisions in parent->child order.
623 # all the revisions in parent->child order.
617 mf1 = mfmatches(node1)
624 mf1 = mfmatches(node1)
618
625
619 # are we comparing the working directory?
626 # are we comparing the working directory?
620 if not node2:
627 if not node2:
621 if not wlock:
628 if not wlock:
622 try:
629 try:
623 wlock = self.wlock(wait=0)
630 wlock = self.wlock(wait=0)
624 except lock.LockException:
631 except lock.LockException:
625 wlock = None
632 wlock = None
626 lookup, modified, added, removed, deleted, unknown, ignored = (
633 lookup, modified, added, removed, deleted, unknown, ignored = (
627 self.dirstate.changes(files, match, show_ignored))
634 self.dirstate.changes(files, match, show_ignored))
628
635
629 # are we comparing working dir against its parent?
636 # are we comparing working dir against its parent?
630 if not node1:
637 if not node1:
631 if lookup:
638 if lookup:
632 # do a full compare of any files that might have changed
639 # do a full compare of any files that might have changed
633 mf2 = mfmatches(self.dirstate.parents()[0])
640 mf2 = mfmatches(self.dirstate.parents()[0])
634 for f in lookup:
641 for f in lookup:
635 if fcmp(f, mf2):
642 if fcmp(f, mf2):
636 modified.append(f)
643 modified.append(f)
637 elif wlock is not None:
644 elif wlock is not None:
638 self.dirstate.update([f], "n")
645 self.dirstate.update([f], "n")
639 else:
646 else:
640 # we are comparing working dir against non-parent
647 # we are comparing working dir against non-parent
641 # generate a pseudo-manifest for the working dir
648 # generate a pseudo-manifest for the working dir
642 mf2 = mfmatches(self.dirstate.parents()[0])
649 mf2 = mfmatches(self.dirstate.parents()[0])
643 for f in lookup + modified + added:
650 for f in lookup + modified + added:
644 mf2[f] = ""
651 mf2[f] = ""
645 for f in removed:
652 for f in removed:
646 if f in mf2:
653 if f in mf2:
647 del mf2[f]
654 del mf2[f]
648 else:
655 else:
649 # we are comparing two revisions
656 # we are comparing two revisions
650 deleted, unknown, ignored = [], [], []
657 deleted, unknown, ignored = [], [], []
651 mf2 = mfmatches(node2)
658 mf2 = mfmatches(node2)
652
659
653 if node1:
660 if node1:
654 # flush lists from dirstate before comparing manifests
661 # flush lists from dirstate before comparing manifests
655 modified, added = [], []
662 modified, added = [], []
656
663
657 for fn in mf2:
664 for fn in mf2:
658 if mf1.has_key(fn):
665 if mf1.has_key(fn):
659 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
666 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
660 modified.append(fn)
667 modified.append(fn)
661 del mf1[fn]
668 del mf1[fn]
662 else:
669 else:
663 added.append(fn)
670 added.append(fn)
664
671
665 removed = mf1.keys()
672 removed = mf1.keys()
666
673
667 # sort and return results:
674 # sort and return results:
668 for l in modified, added, removed, deleted, unknown, ignored:
675 for l in modified, added, removed, deleted, unknown, ignored:
669 l.sort()
676 l.sort()
670 if show_ignored is None:
677 if show_ignored is None:
671 return (modified, added, removed, deleted, unknown)
678 return (modified, added, removed, deleted, unknown)
672 else:
679 else:
673 return (modified, added, removed, deleted, unknown, ignored)
680 return (modified, added, removed, deleted, unknown, ignored)
674
681
675 def add(self, list, wlock=None):
682 def add(self, list, wlock=None):
676 if not wlock:
683 if not wlock:
677 wlock = self.wlock()
684 wlock = self.wlock()
678 for f in list:
685 for f in list:
679 p = self.wjoin(f)
686 p = self.wjoin(f)
680 if not os.path.exists(p):
687 if not os.path.exists(p):
681 self.ui.warn(_("%s does not exist!\n") % f)
688 self.ui.warn(_("%s does not exist!\n") % f)
682 elif not os.path.isfile(p):
689 elif not os.path.isfile(p):
683 self.ui.warn(_("%s not added: only files supported currently\n")
690 self.ui.warn(_("%s not added: only files supported currently\n")
684 % f)
691 % f)
685 elif self.dirstate.state(f) in 'an':
692 elif self.dirstate.state(f) in 'an':
686 self.ui.warn(_("%s already tracked!\n") % f)
693 self.ui.warn(_("%s already tracked!\n") % f)
687 else:
694 else:
688 self.dirstate.update([f], "a")
695 self.dirstate.update([f], "a")
689
696
690 def forget(self, list, wlock=None):
697 def forget(self, list, wlock=None):
691 if not wlock:
698 if not wlock:
692 wlock = self.wlock()
699 wlock = self.wlock()
693 for f in list:
700 for f in list:
694 if self.dirstate.state(f) not in 'ai':
701 if self.dirstate.state(f) not in 'ai':
695 self.ui.warn(_("%s not added!\n") % f)
702 self.ui.warn(_("%s not added!\n") % f)
696 else:
703 else:
697 self.dirstate.forget([f])
704 self.dirstate.forget([f])
698
705
699 def remove(self, list, unlink=False, wlock=None):
706 def remove(self, list, unlink=False, wlock=None):
700 if unlink:
707 if unlink:
701 for f in list:
708 for f in list:
702 try:
709 try:
703 util.unlink(self.wjoin(f))
710 util.unlink(self.wjoin(f))
704 except OSError, inst:
711 except OSError, inst:
705 if inst.errno != errno.ENOENT:
712 if inst.errno != errno.ENOENT:
706 raise
713 raise
707 if not wlock:
714 if not wlock:
708 wlock = self.wlock()
715 wlock = self.wlock()
709 for f in list:
716 for f in list:
710 p = self.wjoin(f)
717 p = self.wjoin(f)
711 if os.path.exists(p):
718 if os.path.exists(p):
712 self.ui.warn(_("%s still exists!\n") % f)
719 self.ui.warn(_("%s still exists!\n") % f)
713 elif self.dirstate.state(f) == 'a':
720 elif self.dirstate.state(f) == 'a':
714 self.dirstate.forget([f])
721 self.dirstate.forget([f])
715 elif f not in self.dirstate:
722 elif f not in self.dirstate:
716 self.ui.warn(_("%s not tracked!\n") % f)
723 self.ui.warn(_("%s not tracked!\n") % f)
717 else:
724 else:
718 self.dirstate.update([f], "r")
725 self.dirstate.update([f], "r")
719
726
720 def undelete(self, list, wlock=None):
727 def undelete(self, list, wlock=None):
721 p = self.dirstate.parents()[0]
728 p = self.dirstate.parents()[0]
722 mn = self.changelog.read(p)[0]
729 mn = self.changelog.read(p)[0]
723 mf = self.manifest.readflags(mn)
730 mf = self.manifest.readflags(mn)
724 m = self.manifest.read(mn)
731 m = self.manifest.read(mn)
725 if not wlock:
732 if not wlock:
726 wlock = self.wlock()
733 wlock = self.wlock()
727 for f in list:
734 for f in list:
728 if self.dirstate.state(f) not in "r":
735 if self.dirstate.state(f) not in "r":
729 self.ui.warn("%s not removed!\n" % f)
736 self.ui.warn("%s not removed!\n" % f)
730 else:
737 else:
731 t = self.file(f).read(m[f])
738 t = self.file(f).read(m[f])
732 self.wwrite(f, t)
739 self.wwrite(f, t)
733 util.set_exec(self.wjoin(f), mf[f])
740 util.set_exec(self.wjoin(f), mf[f])
734 self.dirstate.update([f], "n")
741 self.dirstate.update([f], "n")
735
742
736 def copy(self, source, dest, wlock=None):
743 def copy(self, source, dest, wlock=None):
737 p = self.wjoin(dest)
744 p = self.wjoin(dest)
738 if not os.path.exists(p):
745 if not os.path.exists(p):
739 self.ui.warn(_("%s does not exist!\n") % dest)
746 self.ui.warn(_("%s does not exist!\n") % dest)
740 elif not os.path.isfile(p):
747 elif not os.path.isfile(p):
741 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
748 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
742 else:
749 else:
743 if not wlock:
750 if not wlock:
744 wlock = self.wlock()
751 wlock = self.wlock()
745 if self.dirstate.state(dest) == '?':
752 if self.dirstate.state(dest) == '?':
746 self.dirstate.update([dest], "a")
753 self.dirstate.update([dest], "a")
747 self.dirstate.copy(source, dest)
754 self.dirstate.copy(source, dest)
748
755
749 def heads(self, start=None):
756 def heads(self, start=None):
750 heads = self.changelog.heads(start)
757 heads = self.changelog.heads(start)
751 # sort the output in rev descending order
758 # sort the output in rev descending order
752 heads = [(-self.changelog.rev(h), h) for h in heads]
759 heads = [(-self.changelog.rev(h), h) for h in heads]
753 heads.sort()
760 heads.sort()
754 return [n for (r, n) in heads]
761 return [n for (r, n) in heads]
755
762
756 # branchlookup returns a dict giving a list of branches for
763 # branchlookup returns a dict giving a list of branches for
757 # each head. A branch is defined as the tag of a node or
764 # each head. A branch is defined as the tag of a node or
758 # the branch of the node's parents. If a node has multiple
765 # the branch of the node's parents. If a node has multiple
759 # branch tags, tags are eliminated if they are visible from other
766 # branch tags, tags are eliminated if they are visible from other
760 # branch tags.
767 # branch tags.
761 #
768 #
762 # So, for this graph: a->b->c->d->e
769 # So, for this graph: a->b->c->d->e
763 # \ /
770 # \ /
764 # aa -----/
771 # aa -----/
765 # a has tag 2.6.12
772 # a has tag 2.6.12
766 # d has tag 2.6.13
773 # d has tag 2.6.13
767 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
774 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
768 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
775 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
769 # from the list.
776 # from the list.
770 #
777 #
771 # It is possible that more than one head will have the same branch tag.
778 # It is possible that more than one head will have the same branch tag.
772 # callers need to check the result for multiple heads under the same
779 # callers need to check the result for multiple heads under the same
773 # branch tag if that is a problem for them (ie checkout of a specific
780 # branch tag if that is a problem for them (ie checkout of a specific
774 # branch).
781 # branch).
775 #
782 #
776 # passing in a specific branch will limit the depth of the search
783 # passing in a specific branch will limit the depth of the search
777 # through the parents. It won't limit the branches returned in the
784 # through the parents. It won't limit the branches returned in the
778 # result though.
785 # result though.
779 def branchlookup(self, heads=None, branch=None):
786 def branchlookup(self, heads=None, branch=None):
780 if not heads:
787 if not heads:
781 heads = self.heads()
788 heads = self.heads()
782 headt = [ h for h in heads ]
789 headt = [ h for h in heads ]
783 chlog = self.changelog
790 chlog = self.changelog
784 branches = {}
791 branches = {}
785 merges = []
792 merges = []
786 seenmerge = {}
793 seenmerge = {}
787
794
788 # traverse the tree once for each head, recording in the branches
795 # traverse the tree once for each head, recording in the branches
789 # dict which tags are visible from this head. The branches
796 # dict which tags are visible from this head. The branches
790 # dict also records which tags are visible from each tag
797 # dict also records which tags are visible from each tag
791 # while we traverse.
798 # while we traverse.
792 while headt or merges:
799 while headt or merges:
793 if merges:
800 if merges:
794 n, found = merges.pop()
801 n, found = merges.pop()
795 visit = [n]
802 visit = [n]
796 else:
803 else:
797 h = headt.pop()
804 h = headt.pop()
798 visit = [h]
805 visit = [h]
799 found = [h]
806 found = [h]
800 seen = {}
807 seen = {}
801 while visit:
808 while visit:
802 n = visit.pop()
809 n = visit.pop()
803 if n in seen:
810 if n in seen:
804 continue
811 continue
805 pp = chlog.parents(n)
812 pp = chlog.parents(n)
806 tags = self.nodetags(n)
813 tags = self.nodetags(n)
807 if tags:
814 if tags:
808 for x in tags:
815 for x in tags:
809 if x == 'tip':
816 if x == 'tip':
810 continue
817 continue
811 for f in found:
818 for f in found:
812 branches.setdefault(f, {})[n] = 1
819 branches.setdefault(f, {})[n] = 1
813 branches.setdefault(n, {})[n] = 1
820 branches.setdefault(n, {})[n] = 1
814 break
821 break
815 if n not in found:
822 if n not in found:
816 found.append(n)
823 found.append(n)
817 if branch in tags:
824 if branch in tags:
818 continue
825 continue
819 seen[n] = 1
826 seen[n] = 1
820 if pp[1] != nullid and n not in seenmerge:
827 if pp[1] != nullid and n not in seenmerge:
821 merges.append((pp[1], [x for x in found]))
828 merges.append((pp[1], [x for x in found]))
822 seenmerge[n] = 1
829 seenmerge[n] = 1
823 if pp[0] != nullid:
830 if pp[0] != nullid:
824 visit.append(pp[0])
831 visit.append(pp[0])
825 # traverse the branches dict, eliminating branch tags from each
832 # traverse the branches dict, eliminating branch tags from each
826 # head that are visible from another branch tag for that head.
833 # head that are visible from another branch tag for that head.
827 out = {}
834 out = {}
828 viscache = {}
835 viscache = {}
829 for h in heads:
836 for h in heads:
830 def visible(node):
837 def visible(node):
831 if node in viscache:
838 if node in viscache:
832 return viscache[node]
839 return viscache[node]
833 ret = {}
840 ret = {}
834 visit = [node]
841 visit = [node]
835 while visit:
842 while visit:
836 x = visit.pop()
843 x = visit.pop()
837 if x in viscache:
844 if x in viscache:
838 ret.update(viscache[x])
845 ret.update(viscache[x])
839 elif x not in ret:
846 elif x not in ret:
840 ret[x] = 1
847 ret[x] = 1
841 if x in branches:
848 if x in branches:
842 visit[len(visit):] = branches[x].keys()
849 visit[len(visit):] = branches[x].keys()
843 viscache[node] = ret
850 viscache[node] = ret
844 return ret
851 return ret
845 if h not in branches:
852 if h not in branches:
846 continue
853 continue
847 # O(n^2), but somewhat limited. This only searches the
854 # O(n^2), but somewhat limited. This only searches the
848 # tags visible from a specific head, not all the tags in the
855 # tags visible from a specific head, not all the tags in the
849 # whole repo.
856 # whole repo.
850 for b in branches[h]:
857 for b in branches[h]:
851 vis = False
858 vis = False
852 for bb in branches[h].keys():
859 for bb in branches[h].keys():
853 if b != bb:
860 if b != bb:
854 if b in visible(bb):
861 if b in visible(bb):
855 vis = True
862 vis = True
856 break
863 break
857 if not vis:
864 if not vis:
858 l = out.setdefault(h, [])
865 l = out.setdefault(h, [])
859 l[len(l):] = self.nodetags(b)
866 l[len(l):] = self.nodetags(b)
860 return out
867 return out
861
868
862 def branches(self, nodes):
869 def branches(self, nodes):
863 if not nodes:
870 if not nodes:
864 nodes = [self.changelog.tip()]
871 nodes = [self.changelog.tip()]
865 b = []
872 b = []
866 for n in nodes:
873 for n in nodes:
867 t = n
874 t = n
868 while n:
875 while n:
869 p = self.changelog.parents(n)
876 p = self.changelog.parents(n)
870 if p[1] != nullid or p[0] == nullid:
877 if p[1] != nullid or p[0] == nullid:
871 b.append((t, n, p[0], p[1]))
878 b.append((t, n, p[0], p[1]))
872 break
879 break
873 n = p[0]
880 n = p[0]
874 return b
881 return b
875
882
876 def between(self, pairs):
883 def between(self, pairs):
877 r = []
884 r = []
878
885
879 for top, bottom in pairs:
886 for top, bottom in pairs:
880 n, l, i = top, [], 0
887 n, l, i = top, [], 0
881 f = 1
888 f = 1
882
889
883 while n != bottom:
890 while n != bottom:
884 p = self.changelog.parents(n)[0]
891 p = self.changelog.parents(n)[0]
885 if i == f:
892 if i == f:
886 l.append(n)
893 l.append(n)
887 f = f * 2
894 f = f * 2
888 n = p
895 n = p
889 i += 1
896 i += 1
890
897
891 r.append(l)
898 r.append(l)
892
899
893 return r
900 return r
894
901
895 def findincoming(self, remote, base=None, heads=None, force=False):
902 def findincoming(self, remote, base=None, heads=None, force=False):
896 m = self.changelog.nodemap
903 m = self.changelog.nodemap
897 search = []
904 search = []
898 fetch = {}
905 fetch = {}
899 seen = {}
906 seen = {}
900 seenbranch = {}
907 seenbranch = {}
901 if base == None:
908 if base == None:
902 base = {}
909 base = {}
903
910
904 if not heads:
911 if not heads:
905 heads = remote.heads()
912 heads = remote.heads()
906
913
907 if self.changelog.tip() == nullid:
914 if self.changelog.tip() == nullid:
908 if heads != [nullid]:
915 if heads != [nullid]:
909 return [nullid]
916 return [nullid]
910 return []
917 return []
911
918
912 # assume we're closer to the tip than the root
919 # assume we're closer to the tip than the root
913 # and start by examining the heads
920 # and start by examining the heads
914 self.ui.status(_("searching for changes\n"))
921 self.ui.status(_("searching for changes\n"))
915
922
916 unknown = []
923 unknown = []
917 for h in heads:
924 for h in heads:
918 if h not in m:
925 if h not in m:
919 unknown.append(h)
926 unknown.append(h)
920 else:
927 else:
921 base[h] = 1
928 base[h] = 1
922
929
923 if not unknown:
930 if not unknown:
924 return []
931 return []
925
932
926 rep = {}
933 rep = {}
927 reqcnt = 0
934 reqcnt = 0
928
935
929 # search through remote branches
936 # search through remote branches
930 # a 'branch' here is a linear segment of history, with four parts:
937 # a 'branch' here is a linear segment of history, with four parts:
931 # head, root, first parent, second parent
938 # head, root, first parent, second parent
932 # (a branch always has two parents (or none) by definition)
939 # (a branch always has two parents (or none) by definition)
933 unknown = remote.branches(unknown)
940 unknown = remote.branches(unknown)
934 while unknown:
941 while unknown:
935 r = []
942 r = []
936 while unknown:
943 while unknown:
937 n = unknown.pop(0)
944 n = unknown.pop(0)
938 if n[0] in seen:
945 if n[0] in seen:
939 continue
946 continue
940
947
941 self.ui.debug(_("examining %s:%s\n")
948 self.ui.debug(_("examining %s:%s\n")
942 % (short(n[0]), short(n[1])))
949 % (short(n[0]), short(n[1])))
943 if n[0] == nullid:
950 if n[0] == nullid:
944 break
951 break
945 if n in seenbranch:
952 if n in seenbranch:
946 self.ui.debug(_("branch already found\n"))
953 self.ui.debug(_("branch already found\n"))
947 continue
954 continue
948 if n[1] and n[1] in m: # do we know the base?
955 if n[1] and n[1] in m: # do we know the base?
949 self.ui.debug(_("found incomplete branch %s:%s\n")
956 self.ui.debug(_("found incomplete branch %s:%s\n")
950 % (short(n[0]), short(n[1])))
957 % (short(n[0]), short(n[1])))
951 search.append(n) # schedule branch range for scanning
958 search.append(n) # schedule branch range for scanning
952 seenbranch[n] = 1
959 seenbranch[n] = 1
953 else:
960 else:
954 if n[1] not in seen and n[1] not in fetch:
961 if n[1] not in seen and n[1] not in fetch:
955 if n[2] in m and n[3] in m:
962 if n[2] in m and n[3] in m:
956 self.ui.debug(_("found new changeset %s\n") %
963 self.ui.debug(_("found new changeset %s\n") %
957 short(n[1]))
964 short(n[1]))
958 fetch[n[1]] = 1 # earliest unknown
965 fetch[n[1]] = 1 # earliest unknown
959 base[n[2]] = 1 # latest known
966 base[n[2]] = 1 # latest known
960 continue
967 continue
961
968
962 for a in n[2:4]:
969 for a in n[2:4]:
963 if a not in rep:
970 if a not in rep:
964 r.append(a)
971 r.append(a)
965 rep[a] = 1
972 rep[a] = 1
966
973
967 seen[n[0]] = 1
974 seen[n[0]] = 1
968
975
969 if r:
976 if r:
970 reqcnt += 1
977 reqcnt += 1
971 self.ui.debug(_("request %d: %s\n") %
978 self.ui.debug(_("request %d: %s\n") %
972 (reqcnt, " ".join(map(short, r))))
979 (reqcnt, " ".join(map(short, r))))
973 for p in range(0, len(r), 10):
980 for p in range(0, len(r), 10):
974 for b in remote.branches(r[p:p+10]):
981 for b in remote.branches(r[p:p+10]):
975 self.ui.debug(_("received %s:%s\n") %
982 self.ui.debug(_("received %s:%s\n") %
976 (short(b[0]), short(b[1])))
983 (short(b[0]), short(b[1])))
977 if b[0] in m:
984 if b[0] in m:
978 self.ui.debug(_("found base node %s\n")
985 self.ui.debug(_("found base node %s\n")
979 % short(b[0]))
986 % short(b[0]))
980 base[b[0]] = 1
987 base[b[0]] = 1
981 elif b[0] not in seen:
988 elif b[0] not in seen:
982 unknown.append(b)
989 unknown.append(b)
983
990
984 # do binary search on the branches we found
991 # do binary search on the branches we found
985 while search:
992 while search:
986 n = search.pop(0)
993 n = search.pop(0)
987 reqcnt += 1
994 reqcnt += 1
988 l = remote.between([(n[0], n[1])])[0]
995 l = remote.between([(n[0], n[1])])[0]
989 l.append(n[1])
996 l.append(n[1])
990 p = n[0]
997 p = n[0]
991 f = 1
998 f = 1
992 for i in l:
999 for i in l:
993 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1000 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
994 if i in m:
1001 if i in m:
995 if f <= 2:
1002 if f <= 2:
996 self.ui.debug(_("found new branch changeset %s\n") %
1003 self.ui.debug(_("found new branch changeset %s\n") %
997 short(p))
1004 short(p))
998 fetch[p] = 1
1005 fetch[p] = 1
999 base[i] = 1
1006 base[i] = 1
1000 else:
1007 else:
1001 self.ui.debug(_("narrowed branch search to %s:%s\n")
1008 self.ui.debug(_("narrowed branch search to %s:%s\n")
1002 % (short(p), short(i)))
1009 % (short(p), short(i)))
1003 search.append((p, i))
1010 search.append((p, i))
1004 break
1011 break
1005 p, f = i, f * 2
1012 p, f = i, f * 2
1006
1013
1007 # sanity check our fetch list
1014 # sanity check our fetch list
1008 for f in fetch.keys():
1015 for f in fetch.keys():
1009 if f in m:
1016 if f in m:
1010 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1017 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1011
1018
1012 if base.keys() == [nullid]:
1019 if base.keys() == [nullid]:
1013 if force:
1020 if force:
1014 self.ui.warn(_("warning: repository is unrelated\n"))
1021 self.ui.warn(_("warning: repository is unrelated\n"))
1015 else:
1022 else:
1016 raise util.Abort(_("repository is unrelated"))
1023 raise util.Abort(_("repository is unrelated"))
1017
1024
1018 self.ui.note(_("found new changesets starting at ") +
1025 self.ui.note(_("found new changesets starting at ") +
1019 " ".join([short(f) for f in fetch]) + "\n")
1026 " ".join([short(f) for f in fetch]) + "\n")
1020
1027
1021 self.ui.debug(_("%d total queries\n") % reqcnt)
1028 self.ui.debug(_("%d total queries\n") % reqcnt)
1022
1029
1023 return fetch.keys()
1030 return fetch.keys()
1024
1031
1025 def findoutgoing(self, remote, base=None, heads=None, force=False):
1032 def findoutgoing(self, remote, base=None, heads=None, force=False):
1026 """Return list of nodes that are roots of subsets not in remote
1033 """Return list of nodes that are roots of subsets not in remote
1027
1034
1028 If base dict is specified, assume that these nodes and their parents
1035 If base dict is specified, assume that these nodes and their parents
1029 exist on the remote side.
1036 exist on the remote side.
1030 If a list of heads is specified, return only nodes which are heads
1037 If a list of heads is specified, return only nodes which are heads
1031 or ancestors of these heads, and return a second element which
1038 or ancestors of these heads, and return a second element which
1032 contains all remote heads which get new children.
1039 contains all remote heads which get new children.
1033 """
1040 """
1034 if base == None:
1041 if base == None:
1035 base = {}
1042 base = {}
1036 self.findincoming(remote, base, heads, force=force)
1043 self.findincoming(remote, base, heads, force=force)
1037
1044
1038 self.ui.debug(_("common changesets up to ")
1045 self.ui.debug(_("common changesets up to ")
1039 + " ".join(map(short, base.keys())) + "\n")
1046 + " ".join(map(short, base.keys())) + "\n")
1040
1047
1041 remain = dict.fromkeys(self.changelog.nodemap)
1048 remain = dict.fromkeys(self.changelog.nodemap)
1042
1049
1043 # prune everything remote has from the tree
1050 # prune everything remote has from the tree
1044 del remain[nullid]
1051 del remain[nullid]
1045 remove = base.keys()
1052 remove = base.keys()
1046 while remove:
1053 while remove:
1047 n = remove.pop(0)
1054 n = remove.pop(0)
1048 if n in remain:
1055 if n in remain:
1049 del remain[n]
1056 del remain[n]
1050 for p in self.changelog.parents(n):
1057 for p in self.changelog.parents(n):
1051 remove.append(p)
1058 remove.append(p)
1052
1059
1053 # find every node whose parents have been pruned
1060 # find every node whose parents have been pruned
1054 subset = []
1061 subset = []
1055 # find every remote head that will get new children
1062 # find every remote head that will get new children
1056 updated_heads = {}
1063 updated_heads = {}
1057 for n in remain:
1064 for n in remain:
1058 p1, p2 = self.changelog.parents(n)
1065 p1, p2 = self.changelog.parents(n)
1059 if p1 not in remain and p2 not in remain:
1066 if p1 not in remain and p2 not in remain:
1060 subset.append(n)
1067 subset.append(n)
1061 if heads:
1068 if heads:
1062 if p1 in heads:
1069 if p1 in heads:
1063 updated_heads[p1] = True
1070 updated_heads[p1] = True
1064 if p2 in heads:
1071 if p2 in heads:
1065 updated_heads[p2] = True
1072 updated_heads[p2] = True
1066
1073
1067 # this is the set of all roots we have to push
1074 # this is the set of all roots we have to push
1068 if heads:
1075 if heads:
1069 return subset, updated_heads.keys()
1076 return subset, updated_heads.keys()
1070 else:
1077 else:
1071 return subset
1078 return subset
1072
1079
1073 def pull(self, remote, heads=None, force=False):
1080 def pull(self, remote, heads=None, force=False):
1074 l = self.lock()
1081 l = self.lock()
1075
1082
1076 fetch = self.findincoming(remote, force=force)
1083 fetch = self.findincoming(remote, force=force)
1077 if fetch == [nullid]:
1084 if fetch == [nullid]:
1078 self.ui.status(_("requesting all changes\n"))
1085 self.ui.status(_("requesting all changes\n"))
1079
1086
1080 if not fetch:
1087 if not fetch:
1081 self.ui.status(_("no changes found\n"))
1088 self.ui.status(_("no changes found\n"))
1082 return 0
1089 return 0
1083
1090
1084 if heads is None:
1091 if heads is None:
1085 cg = remote.changegroup(fetch, 'pull')
1092 cg = remote.changegroup(fetch, 'pull')
1086 else:
1093 else:
1087 cg = remote.changegroupsubset(fetch, heads, 'pull')
1094 cg = remote.changegroupsubset(fetch, heads, 'pull')
1088 return self.addchangegroup(cg, 'pull')
1095 return self.addchangegroup(cg, 'pull')
1089
1096
1090 def push(self, remote, force=False, revs=None):
1097 def push(self, remote, force=False, revs=None):
1091 lock = remote.lock()
1098 lock = remote.lock()
1092
1099
1093 base = {}
1100 base = {}
1094 remote_heads = remote.heads()
1101 remote_heads = remote.heads()
1095 inc = self.findincoming(remote, base, remote_heads, force=force)
1102 inc = self.findincoming(remote, base, remote_heads, force=force)
1096 if not force and inc:
1103 if not force and inc:
1097 self.ui.warn(_("abort: unsynced remote changes!\n"))
1104 self.ui.warn(_("abort: unsynced remote changes!\n"))
1098 self.ui.status(_("(did you forget to sync?"
1105 self.ui.status(_("(did you forget to sync?"
1099 " use push -f to force)\n"))
1106 " use push -f to force)\n"))
1100 return 1
1107 return 1
1101
1108
1102 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1109 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1103 if revs is not None:
1110 if revs is not None:
1104 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1111 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1105 else:
1112 else:
1106 bases, heads = update, self.changelog.heads()
1113 bases, heads = update, self.changelog.heads()
1107
1114
1108 if not bases:
1115 if not bases:
1109 self.ui.status(_("no changes found\n"))
1116 self.ui.status(_("no changes found\n"))
1110 return 1
1117 return 1
1111 elif not force:
1118 elif not force:
1112 # FIXME we don't properly detect creation of new heads
1119 # FIXME we don't properly detect creation of new heads
1113 # in the push -r case, assume the user knows what he's doing
1120 # in the push -r case, assume the user knows what he's doing
1114 if not revs and len(remote_heads) < len(heads) \
1121 if not revs and len(remote_heads) < len(heads) \
1115 and remote_heads != [nullid]:
1122 and remote_heads != [nullid]:
1116 self.ui.warn(_("abort: push creates new remote branches!\n"))
1123 self.ui.warn(_("abort: push creates new remote branches!\n"))
1117 self.ui.status(_("(did you forget to merge?"
1124 self.ui.status(_("(did you forget to merge?"
1118 " use push -f to force)\n"))
1125 " use push -f to force)\n"))
1119 return 1
1126 return 1
1120
1127
1121 if revs is None:
1128 if revs is None:
1122 cg = self.changegroup(update, 'push')
1129 cg = self.changegroup(update, 'push')
1123 else:
1130 else:
1124 cg = self.changegroupsubset(update, revs, 'push')
1131 cg = self.changegroupsubset(update, revs, 'push')
1125 return remote.addchangegroup(cg, 'push')
1132 return remote.addchangegroup(cg, 'push')
1126
1133
1127 def changegroupsubset(self, bases, heads, source):
1134 def changegroupsubset(self, bases, heads, source):
1128 """This function generates a changegroup consisting of all the nodes
1135 """This function generates a changegroup consisting of all the nodes
1129 that are descendents of any of the bases, and ancestors of any of
1136 that are descendents of any of the bases, and ancestors of any of
1130 the heads.
1137 the heads.
1131
1138
1132 It is fairly complex as determining which filenodes and which
1139 It is fairly complex as determining which filenodes and which
1133 manifest nodes need to be included for the changeset to be complete
1140 manifest nodes need to be included for the changeset to be complete
1134 is non-trivial.
1141 is non-trivial.
1135
1142
1136 Another wrinkle is doing the reverse, figuring out which changeset in
1143 Another wrinkle is doing the reverse, figuring out which changeset in
1137 the changegroup a particular filenode or manifestnode belongs to."""
1144 the changegroup a particular filenode or manifestnode belongs to."""
1138
1145
1139 self.hook('preoutgoing', throw=True, source=source)
1146 self.hook('preoutgoing', throw=True, source=source)
1140
1147
1141 # Set up some initial variables
1148 # Set up some initial variables
1142 # Make it easy to refer to self.changelog
1149 # Make it easy to refer to self.changelog
1143 cl = self.changelog
1150 cl = self.changelog
1144 # msng is short for missing - compute the list of changesets in this
1151 # msng is short for missing - compute the list of changesets in this
1145 # changegroup.
1152 # changegroup.
1146 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1153 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1147 # Some bases may turn out to be superfluous, and some heads may be
1154 # Some bases may turn out to be superfluous, and some heads may be
1148 # too. nodesbetween will return the minimal set of bases and heads
1155 # too. nodesbetween will return the minimal set of bases and heads
1149 # necessary to re-create the changegroup.
1156 # necessary to re-create the changegroup.
1150
1157
1151 # Known heads are the list of heads that it is assumed the recipient
1158 # Known heads are the list of heads that it is assumed the recipient
1152 # of this changegroup will know about.
1159 # of this changegroup will know about.
1153 knownheads = {}
1160 knownheads = {}
1154 # We assume that all parents of bases are known heads.
1161 # We assume that all parents of bases are known heads.
1155 for n in bases:
1162 for n in bases:
1156 for p in cl.parents(n):
1163 for p in cl.parents(n):
1157 if p != nullid:
1164 if p != nullid:
1158 knownheads[p] = 1
1165 knownheads[p] = 1
1159 knownheads = knownheads.keys()
1166 knownheads = knownheads.keys()
1160 if knownheads:
1167 if knownheads:
1161 # Now that we know what heads are known, we can compute which
1168 # Now that we know what heads are known, we can compute which
1162 # changesets are known. The recipient must know about all
1169 # changesets are known. The recipient must know about all
1163 # changesets required to reach the known heads from the null
1170 # changesets required to reach the known heads from the null
1164 # changeset.
1171 # changeset.
1165 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1172 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1166 junk = None
1173 junk = None
1167 # Transform the list into an ersatz set.
1174 # Transform the list into an ersatz set.
1168 has_cl_set = dict.fromkeys(has_cl_set)
1175 has_cl_set = dict.fromkeys(has_cl_set)
1169 else:
1176 else:
1170 # If there were no known heads, the recipient cannot be assumed to
1177 # If there were no known heads, the recipient cannot be assumed to
1171 # know about any changesets.
1178 # know about any changesets.
1172 has_cl_set = {}
1179 has_cl_set = {}
1173
1180
1174 # Make it easy to refer to self.manifest
1181 # Make it easy to refer to self.manifest
1175 mnfst = self.manifest
1182 mnfst = self.manifest
1176 # We don't know which manifests are missing yet
1183 # We don't know which manifests are missing yet
1177 msng_mnfst_set = {}
1184 msng_mnfst_set = {}
1178 # Nor do we know which filenodes are missing.
1185 # Nor do we know which filenodes are missing.
1179 msng_filenode_set = {}
1186 msng_filenode_set = {}
1180
1187
1181 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1188 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1182 junk = None
1189 junk = None
1183
1190
1184 # A changeset always belongs to itself, so the changenode lookup
1191 # A changeset always belongs to itself, so the changenode lookup
1185 # function for a changenode is identity.
1192 # function for a changenode is identity.
1186 def identity(x):
1193 def identity(x):
1187 return x
1194 return x
1188
1195
1189 # A function generating function. Sets up an environment for the
1196 # A function generating function. Sets up an environment for the
1190 # inner function.
1197 # inner function.
1191 def cmp_by_rev_func(revlog):
1198 def cmp_by_rev_func(revlog):
1192 # Compare two nodes by their revision number in the environment's
1199 # Compare two nodes by their revision number in the environment's
1193 # revision history. Since the revision number both represents the
1200 # revision history. Since the revision number both represents the
1194 # most efficient order to read the nodes in, and represents a
1201 # most efficient order to read the nodes in, and represents a
1195 # topological sorting of the nodes, this function is often useful.
1202 # topological sorting of the nodes, this function is often useful.
1196 def cmp_by_rev(a, b):
1203 def cmp_by_rev(a, b):
1197 return cmp(revlog.rev(a), revlog.rev(b))
1204 return cmp(revlog.rev(a), revlog.rev(b))
1198 return cmp_by_rev
1205 return cmp_by_rev
1199
1206
1200 # If we determine that a particular file or manifest node must be a
1207 # If we determine that a particular file or manifest node must be a
1201 # node that the recipient of the changegroup will already have, we can
1208 # node that the recipient of the changegroup will already have, we can
1202 # also assume the recipient will have all the parents. This function
1209 # also assume the recipient will have all the parents. This function
1203 # prunes them from the set of missing nodes.
1210 # prunes them from the set of missing nodes.
1204 def prune_parents(revlog, hasset, msngset):
1211 def prune_parents(revlog, hasset, msngset):
1205 haslst = hasset.keys()
1212 haslst = hasset.keys()
1206 haslst.sort(cmp_by_rev_func(revlog))
1213 haslst.sort(cmp_by_rev_func(revlog))
1207 for node in haslst:
1214 for node in haslst:
1208 parentlst = [p for p in revlog.parents(node) if p != nullid]
1215 parentlst = [p for p in revlog.parents(node) if p != nullid]
1209 while parentlst:
1216 while parentlst:
1210 n = parentlst.pop()
1217 n = parentlst.pop()
1211 if n not in hasset:
1218 if n not in hasset:
1212 hasset[n] = 1
1219 hasset[n] = 1
1213 p = [p for p in revlog.parents(n) if p != nullid]
1220 p = [p for p in revlog.parents(n) if p != nullid]
1214 parentlst.extend(p)
1221 parentlst.extend(p)
1215 for n in hasset:
1222 for n in hasset:
1216 msngset.pop(n, None)
1223 msngset.pop(n, None)
1217
1224
1218 # This is a function generating function used to set up an environment
1225 # This is a function generating function used to set up an environment
1219 # for the inner function to execute in.
1226 # for the inner function to execute in.
1220 def manifest_and_file_collector(changedfileset):
1227 def manifest_and_file_collector(changedfileset):
1221 # This is an information gathering function that gathers
1228 # This is an information gathering function that gathers
1222 # information from each changeset node that goes out as part of
1229 # information from each changeset node that goes out as part of
1223 # the changegroup. The information gathered is a list of which
1230 # the changegroup. The information gathered is a list of which
1224 # manifest nodes are potentially required (the recipient may
1231 # manifest nodes are potentially required (the recipient may
1225 # already have them) and total list of all files which were
1232 # already have them) and total list of all files which were
1226 # changed in any changeset in the changegroup.
1233 # changed in any changeset in the changegroup.
1227 #
1234 #
1228 # We also remember the first changenode we saw any manifest
1235 # We also remember the first changenode we saw any manifest
1229 # referenced by so we can later determine which changenode 'owns'
1236 # referenced by so we can later determine which changenode 'owns'
1230 # the manifest.
1237 # the manifest.
1231 def collect_manifests_and_files(clnode):
1238 def collect_manifests_and_files(clnode):
1232 c = cl.read(clnode)
1239 c = cl.read(clnode)
1233 for f in c[3]:
1240 for f in c[3]:
1234 # This is to make sure we only have one instance of each
1241 # This is to make sure we only have one instance of each
1235 # filename string for each filename.
1242 # filename string for each filename.
1236 changedfileset.setdefault(f, f)
1243 changedfileset.setdefault(f, f)
1237 msng_mnfst_set.setdefault(c[0], clnode)
1244 msng_mnfst_set.setdefault(c[0], clnode)
1238 return collect_manifests_and_files
1245 return collect_manifests_and_files
1239
1246
1240 # Figure out which manifest nodes (of the ones we think might be part
1247 # Figure out which manifest nodes (of the ones we think might be part
1241 # of the changegroup) the recipient must know about and remove them
1248 # of the changegroup) the recipient must know about and remove them
1242 # from the changegroup.
1249 # from the changegroup.
1243 def prune_manifests():
1250 def prune_manifests():
1244 has_mnfst_set = {}
1251 has_mnfst_set = {}
1245 for n in msng_mnfst_set:
1252 for n in msng_mnfst_set:
1246 # If a 'missing' manifest thinks it belongs to a changenode
1253 # If a 'missing' manifest thinks it belongs to a changenode
1247 # the recipient is assumed to have, obviously the recipient
1254 # the recipient is assumed to have, obviously the recipient
1248 # must have that manifest.
1255 # must have that manifest.
1249 linknode = cl.node(mnfst.linkrev(n))
1256 linknode = cl.node(mnfst.linkrev(n))
1250 if linknode in has_cl_set:
1257 if linknode in has_cl_set:
1251 has_mnfst_set[n] = 1
1258 has_mnfst_set[n] = 1
1252 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1259 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1253
1260
1254 # Use the information collected in collect_manifests_and_files to say
1261 # Use the information collected in collect_manifests_and_files to say
1255 # which changenode any manifestnode belongs to.
1262 # which changenode any manifestnode belongs to.
1256 def lookup_manifest_link(mnfstnode):
1263 def lookup_manifest_link(mnfstnode):
1257 return msng_mnfst_set[mnfstnode]
1264 return msng_mnfst_set[mnfstnode]
1258
1265
1259 # A function generating function that sets up the initial environment
1266 # A function generating function that sets up the initial environment
1260 # the inner function.
1267 # the inner function.
1261 def filenode_collector(changedfiles):
1268 def filenode_collector(changedfiles):
1262 next_rev = [0]
1269 next_rev = [0]
1263 # This gathers information from each manifestnode included in the
1270 # This gathers information from each manifestnode included in the
1264 # changegroup about which filenodes the manifest node references
1271 # changegroup about which filenodes the manifest node references
1265 # so we can include those in the changegroup too.
1272 # so we can include those in the changegroup too.
1266 #
1273 #
1267 # It also remembers which changenode each filenode belongs to. It
1274 # It also remembers which changenode each filenode belongs to. It
1268 # does this by assuming the a filenode belongs to the changenode
1275 # does this by assuming the a filenode belongs to the changenode
1269 # the first manifest that references it belongs to.
1276 # the first manifest that references it belongs to.
1270 def collect_msng_filenodes(mnfstnode):
1277 def collect_msng_filenodes(mnfstnode):
1271 r = mnfst.rev(mnfstnode)
1278 r = mnfst.rev(mnfstnode)
1272 if r == next_rev[0]:
1279 if r == next_rev[0]:
1273 # If the last rev we looked at was the one just previous,
1280 # If the last rev we looked at was the one just previous,
1274 # we only need to see a diff.
1281 # we only need to see a diff.
1275 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1282 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1276 # For each line in the delta
1283 # For each line in the delta
1277 for dline in delta.splitlines():
1284 for dline in delta.splitlines():
1278 # get the filename and filenode for that line
1285 # get the filename and filenode for that line
1279 f, fnode = dline.split('\0')
1286 f, fnode = dline.split('\0')
1280 fnode = bin(fnode[:40])
1287 fnode = bin(fnode[:40])
1281 f = changedfiles.get(f, None)
1288 f = changedfiles.get(f, None)
1282 # And if the file is in the list of files we care
1289 # And if the file is in the list of files we care
1283 # about.
1290 # about.
1284 if f is not None:
1291 if f is not None:
1285 # Get the changenode this manifest belongs to
1292 # Get the changenode this manifest belongs to
1286 clnode = msng_mnfst_set[mnfstnode]
1293 clnode = msng_mnfst_set[mnfstnode]
1287 # Create the set of filenodes for the file if
1294 # Create the set of filenodes for the file if
1288 # there isn't one already.
1295 # there isn't one already.
1289 ndset = msng_filenode_set.setdefault(f, {})
1296 ndset = msng_filenode_set.setdefault(f, {})
1290 # And set the filenode's changelog node to the
1297 # And set the filenode's changelog node to the
1291 # manifest's if it hasn't been set already.
1298 # manifest's if it hasn't been set already.
1292 ndset.setdefault(fnode, clnode)
1299 ndset.setdefault(fnode, clnode)
1293 else:
1300 else:
1294 # Otherwise we need a full manifest.
1301 # Otherwise we need a full manifest.
1295 m = mnfst.read(mnfstnode)
1302 m = mnfst.read(mnfstnode)
1296 # For every file in we care about.
1303 # For every file in we care about.
1297 for f in changedfiles:
1304 for f in changedfiles:
1298 fnode = m.get(f, None)
1305 fnode = m.get(f, None)
1299 # If it's in the manifest
1306 # If it's in the manifest
1300 if fnode is not None:
1307 if fnode is not None:
1301 # See comments above.
1308 # See comments above.
1302 clnode = msng_mnfst_set[mnfstnode]
1309 clnode = msng_mnfst_set[mnfstnode]
1303 ndset = msng_filenode_set.setdefault(f, {})
1310 ndset = msng_filenode_set.setdefault(f, {})
1304 ndset.setdefault(fnode, clnode)
1311 ndset.setdefault(fnode, clnode)
1305 # Remember the revision we hope to see next.
1312 # Remember the revision we hope to see next.
1306 next_rev[0] = r + 1
1313 next_rev[0] = r + 1
1307 return collect_msng_filenodes
1314 return collect_msng_filenodes
1308
1315
1309 # We have a list of filenodes we think we need for a file, lets remove
1316 # We have a list of filenodes we think we need for a file, lets remove
1310 # all those we now the recipient must have.
1317 # all those we now the recipient must have.
1311 def prune_filenodes(f, filerevlog):
1318 def prune_filenodes(f, filerevlog):
1312 msngset = msng_filenode_set[f]
1319 msngset = msng_filenode_set[f]
1313 hasset = {}
1320 hasset = {}
1314 # If a 'missing' filenode thinks it belongs to a changenode we
1321 # If a 'missing' filenode thinks it belongs to a changenode we
1315 # assume the recipient must have, then the recipient must have
1322 # assume the recipient must have, then the recipient must have
1316 # that filenode.
1323 # that filenode.
1317 for n in msngset:
1324 for n in msngset:
1318 clnode = cl.node(filerevlog.linkrev(n))
1325 clnode = cl.node(filerevlog.linkrev(n))
1319 if clnode in has_cl_set:
1326 if clnode in has_cl_set:
1320 hasset[n] = 1
1327 hasset[n] = 1
1321 prune_parents(filerevlog, hasset, msngset)
1328 prune_parents(filerevlog, hasset, msngset)
1322
1329
1323 # A function generator function that sets up the a context for the
1330 # A function generator function that sets up the a context for the
1324 # inner function.
1331 # inner function.
1325 def lookup_filenode_link_func(fname):
1332 def lookup_filenode_link_func(fname):
1326 msngset = msng_filenode_set[fname]
1333 msngset = msng_filenode_set[fname]
1327 # Lookup the changenode the filenode belongs to.
1334 # Lookup the changenode the filenode belongs to.
1328 def lookup_filenode_link(fnode):
1335 def lookup_filenode_link(fnode):
1329 return msngset[fnode]
1336 return msngset[fnode]
1330 return lookup_filenode_link
1337 return lookup_filenode_link
1331
1338
1332 # Now that we have all theses utility functions to help out and
1339 # Now that we have all theses utility functions to help out and
1333 # logically divide up the task, generate the group.
1340 # logically divide up the task, generate the group.
1334 def gengroup():
1341 def gengroup():
1335 # The set of changed files starts empty.
1342 # The set of changed files starts empty.
1336 changedfiles = {}
1343 changedfiles = {}
1337 # Create a changenode group generator that will call our functions
1344 # Create a changenode group generator that will call our functions
1338 # back to lookup the owning changenode and collect information.
1345 # back to lookup the owning changenode and collect information.
1339 group = cl.group(msng_cl_lst, identity,
1346 group = cl.group(msng_cl_lst, identity,
1340 manifest_and_file_collector(changedfiles))
1347 manifest_and_file_collector(changedfiles))
1341 for chnk in group:
1348 for chnk in group:
1342 yield chnk
1349 yield chnk
1343
1350
1344 # The list of manifests has been collected by the generator
1351 # The list of manifests has been collected by the generator
1345 # calling our functions back.
1352 # calling our functions back.
1346 prune_manifests()
1353 prune_manifests()
1347 msng_mnfst_lst = msng_mnfst_set.keys()
1354 msng_mnfst_lst = msng_mnfst_set.keys()
1348 # Sort the manifestnodes by revision number.
1355 # Sort the manifestnodes by revision number.
1349 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1356 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1350 # Create a generator for the manifestnodes that calls our lookup
1357 # Create a generator for the manifestnodes that calls our lookup
1351 # and data collection functions back.
1358 # and data collection functions back.
1352 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1359 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1353 filenode_collector(changedfiles))
1360 filenode_collector(changedfiles))
1354 for chnk in group:
1361 for chnk in group:
1355 yield chnk
1362 yield chnk
1356
1363
1357 # These are no longer needed, dereference and toss the memory for
1364 # These are no longer needed, dereference and toss the memory for
1358 # them.
1365 # them.
1359 msng_mnfst_lst = None
1366 msng_mnfst_lst = None
1360 msng_mnfst_set.clear()
1367 msng_mnfst_set.clear()
1361
1368
1362 changedfiles = changedfiles.keys()
1369 changedfiles = changedfiles.keys()
1363 changedfiles.sort()
1370 changedfiles.sort()
1364 # Go through all our files in order sorted by name.
1371 # Go through all our files in order sorted by name.
1365 for fname in changedfiles:
1372 for fname in changedfiles:
1366 filerevlog = self.file(fname)
1373 filerevlog = self.file(fname)
1367 # Toss out the filenodes that the recipient isn't really
1374 # Toss out the filenodes that the recipient isn't really
1368 # missing.
1375 # missing.
1369 if msng_filenode_set.has_key(fname):
1376 if msng_filenode_set.has_key(fname):
1370 prune_filenodes(fname, filerevlog)
1377 prune_filenodes(fname, filerevlog)
1371 msng_filenode_lst = msng_filenode_set[fname].keys()
1378 msng_filenode_lst = msng_filenode_set[fname].keys()
1372 else:
1379 else:
1373 msng_filenode_lst = []
1380 msng_filenode_lst = []
1374 # If any filenodes are left, generate the group for them,
1381 # If any filenodes are left, generate the group for them,
1375 # otherwise don't bother.
1382 # otherwise don't bother.
1376 if len(msng_filenode_lst) > 0:
1383 if len(msng_filenode_lst) > 0:
1377 yield changegroup.genchunk(fname)
1384 yield changegroup.genchunk(fname)
1378 # Sort the filenodes by their revision #
1385 # Sort the filenodes by their revision #
1379 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1386 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1380 # Create a group generator and only pass in a changenode
1387 # Create a group generator and only pass in a changenode
1381 # lookup function as we need to collect no information
1388 # lookup function as we need to collect no information
1382 # from filenodes.
1389 # from filenodes.
1383 group = filerevlog.group(msng_filenode_lst,
1390 group = filerevlog.group(msng_filenode_lst,
1384 lookup_filenode_link_func(fname))
1391 lookup_filenode_link_func(fname))
1385 for chnk in group:
1392 for chnk in group:
1386 yield chnk
1393 yield chnk
1387 if msng_filenode_set.has_key(fname):
1394 if msng_filenode_set.has_key(fname):
1388 # Don't need this anymore, toss it to free memory.
1395 # Don't need this anymore, toss it to free memory.
1389 del msng_filenode_set[fname]
1396 del msng_filenode_set[fname]
1390 # Signal that no more groups are left.
1397 # Signal that no more groups are left.
1391 yield changegroup.closechunk()
1398 yield changegroup.closechunk()
1392
1399
1393 if msng_cl_lst:
1400 if msng_cl_lst:
1394 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1401 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1395
1402
1396 return util.chunkbuffer(gengroup())
1403 return util.chunkbuffer(gengroup())
1397
1404
1398 def changegroup(self, basenodes, source):
1405 def changegroup(self, basenodes, source):
1399 """Generate a changegroup of all nodes that we have that a recipient
1406 """Generate a changegroup of all nodes that we have that a recipient
1400 doesn't.
1407 doesn't.
1401
1408
1402 This is much easier than the previous function as we can assume that
1409 This is much easier than the previous function as we can assume that
1403 the recipient has any changenode we aren't sending them."""
1410 the recipient has any changenode we aren't sending them."""
1404
1411
1405 self.hook('preoutgoing', throw=True, source=source)
1412 self.hook('preoutgoing', throw=True, source=source)
1406
1413
1407 cl = self.changelog
1414 cl = self.changelog
1408 nodes = cl.nodesbetween(basenodes, None)[0]
1415 nodes = cl.nodesbetween(basenodes, None)[0]
1409 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1416 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1410
1417
1411 def identity(x):
1418 def identity(x):
1412 return x
1419 return x
1413
1420
1414 def gennodelst(revlog):
1421 def gennodelst(revlog):
1415 for r in xrange(0, revlog.count()):
1422 for r in xrange(0, revlog.count()):
1416 n = revlog.node(r)
1423 n = revlog.node(r)
1417 if revlog.linkrev(n) in revset:
1424 if revlog.linkrev(n) in revset:
1418 yield n
1425 yield n
1419
1426
1420 def changed_file_collector(changedfileset):
1427 def changed_file_collector(changedfileset):
1421 def collect_changed_files(clnode):
1428 def collect_changed_files(clnode):
1422 c = cl.read(clnode)
1429 c = cl.read(clnode)
1423 for fname in c[3]:
1430 for fname in c[3]:
1424 changedfileset[fname] = 1
1431 changedfileset[fname] = 1
1425 return collect_changed_files
1432 return collect_changed_files
1426
1433
1427 def lookuprevlink_func(revlog):
1434 def lookuprevlink_func(revlog):
1428 def lookuprevlink(n):
1435 def lookuprevlink(n):
1429 return cl.node(revlog.linkrev(n))
1436 return cl.node(revlog.linkrev(n))
1430 return lookuprevlink
1437 return lookuprevlink
1431
1438
1432 def gengroup():
1439 def gengroup():
1433 # construct a list of all changed files
1440 # construct a list of all changed files
1434 changedfiles = {}
1441 changedfiles = {}
1435
1442
1436 for chnk in cl.group(nodes, identity,
1443 for chnk in cl.group(nodes, identity,
1437 changed_file_collector(changedfiles)):
1444 changed_file_collector(changedfiles)):
1438 yield chnk
1445 yield chnk
1439 changedfiles = changedfiles.keys()
1446 changedfiles = changedfiles.keys()
1440 changedfiles.sort()
1447 changedfiles.sort()
1441
1448
1442 mnfst = self.manifest
1449 mnfst = self.manifest
1443 nodeiter = gennodelst(mnfst)
1450 nodeiter = gennodelst(mnfst)
1444 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1451 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1445 yield chnk
1452 yield chnk
1446
1453
1447 for fname in changedfiles:
1454 for fname in changedfiles:
1448 filerevlog = self.file(fname)
1455 filerevlog = self.file(fname)
1449 nodeiter = gennodelst(filerevlog)
1456 nodeiter = gennodelst(filerevlog)
1450 nodeiter = list(nodeiter)
1457 nodeiter = list(nodeiter)
1451 if nodeiter:
1458 if nodeiter:
1452 yield changegroup.genchunk(fname)
1459 yield changegroup.genchunk(fname)
1453 lookup = lookuprevlink_func(filerevlog)
1460 lookup = lookuprevlink_func(filerevlog)
1454 for chnk in filerevlog.group(nodeiter, lookup):
1461 for chnk in filerevlog.group(nodeiter, lookup):
1455 yield chnk
1462 yield chnk
1456
1463
1457 yield changegroup.closechunk()
1464 yield changegroup.closechunk()
1458
1465
1459 if nodes:
1466 if nodes:
1460 self.hook('outgoing', node=hex(nodes[0]), source=source)
1467 self.hook('outgoing', node=hex(nodes[0]), source=source)
1461
1468
1462 return util.chunkbuffer(gengroup())
1469 return util.chunkbuffer(gengroup())
1463
1470
1464 def addchangegroup(self, source, srctype):
1471 def addchangegroup(self, source, srctype):
1465 """add changegroup to repo.
1472 """add changegroup to repo.
1466 returns number of heads modified or added + 1."""
1473 returns number of heads modified or added + 1."""
1467
1474
1468 def csmap(x):
1475 def csmap(x):
1469 self.ui.debug(_("add changeset %s\n") % short(x))
1476 self.ui.debug(_("add changeset %s\n") % short(x))
1470 return cl.count()
1477 return cl.count()
1471
1478
1472 def revmap(x):
1479 def revmap(x):
1473 return cl.rev(x)
1480 return cl.rev(x)
1474
1481
1475 if not source:
1482 if not source:
1476 return 0
1483 return 0
1477
1484
1478 self.hook('prechangegroup', throw=True, source=srctype)
1485 self.hook('prechangegroup', throw=True, source=srctype)
1479
1486
1480 changesets = files = revisions = 0
1487 changesets = files = revisions = 0
1481
1488
1482 tr = self.transaction()
1489 tr = self.transaction()
1483
1490
1484 # write changelog and manifest data to temp files so
1491 # write changelog and manifest data to temp files so
1485 # concurrent readers will not see inconsistent view
1492 # concurrent readers will not see inconsistent view
1486 cl = None
1493 cl = None
1487 try:
1494 try:
1488 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1495 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1489
1496
1490 oldheads = len(cl.heads())
1497 oldheads = len(cl.heads())
1491
1498
1492 # pull off the changeset group
1499 # pull off the changeset group
1493 self.ui.status(_("adding changesets\n"))
1500 self.ui.status(_("adding changesets\n"))
1494 co = cl.tip()
1501 co = cl.tip()
1495 chunkiter = changegroup.chunkiter(source)
1502 chunkiter = changegroup.chunkiter(source)
1496 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1503 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1497 cnr, cor = map(cl.rev, (cn, co))
1504 cnr, cor = map(cl.rev, (cn, co))
1498 if cn == nullid:
1505 if cn == nullid:
1499 cnr = cor
1506 cnr = cor
1500 changesets = cnr - cor
1507 changesets = cnr - cor
1501
1508
1502 mf = None
1509 mf = None
1503 try:
1510 try:
1504 mf = appendfile.appendmanifest(self.opener,
1511 mf = appendfile.appendmanifest(self.opener,
1505 self.manifest.version)
1512 self.manifest.version)
1506
1513
1507 # pull off the manifest group
1514 # pull off the manifest group
1508 self.ui.status(_("adding manifests\n"))
1515 self.ui.status(_("adding manifests\n"))
1509 mm = mf.tip()
1516 mm = mf.tip()
1510 chunkiter = changegroup.chunkiter(source)
1517 chunkiter = changegroup.chunkiter(source)
1511 mo = mf.addgroup(chunkiter, revmap, tr)
1518 mo = mf.addgroup(chunkiter, revmap, tr)
1512
1519
1513 # process the files
1520 # process the files
1514 self.ui.status(_("adding file changes\n"))
1521 self.ui.status(_("adding file changes\n"))
1515 while 1:
1522 while 1:
1516 f = changegroup.getchunk(source)
1523 f = changegroup.getchunk(source)
1517 if not f:
1524 if not f:
1518 break
1525 break
1519 self.ui.debug(_("adding %s revisions\n") % f)
1526 self.ui.debug(_("adding %s revisions\n") % f)
1520 fl = self.file(f)
1527 fl = self.file(f)
1521 o = fl.count()
1528 o = fl.count()
1522 chunkiter = changegroup.chunkiter(source)
1529 chunkiter = changegroup.chunkiter(source)
1523 n = fl.addgroup(chunkiter, revmap, tr)
1530 n = fl.addgroup(chunkiter, revmap, tr)
1524 revisions += fl.count() - o
1531 revisions += fl.count() - o
1525 files += 1
1532 files += 1
1526
1533
1527 # write order here is important so concurrent readers will see
1534 # write order here is important so concurrent readers will see
1528 # consistent view of repo
1535 # consistent view of repo
1529 mf.writedata()
1536 mf.writedata()
1530 finally:
1537 finally:
1531 if mf:
1538 if mf:
1532 mf.cleanup()
1539 mf.cleanup()
1533 cl.writedata()
1540 cl.writedata()
1534 finally:
1541 finally:
1535 if cl:
1542 if cl:
1536 cl.cleanup()
1543 cl.cleanup()
1537
1544
1538 # make changelog and manifest see real files again
1545 # make changelog and manifest see real files again
1539 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1546 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1540 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1547 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1541 self.changelog.checkinlinesize(tr)
1548 self.changelog.checkinlinesize(tr)
1542 self.manifest.checkinlinesize(tr)
1549 self.manifest.checkinlinesize(tr)
1543
1550
1544 newheads = len(self.changelog.heads())
1551 newheads = len(self.changelog.heads())
1545 heads = ""
1552 heads = ""
1546 if oldheads and newheads > oldheads:
1553 if oldheads and newheads > oldheads:
1547 heads = _(" (+%d heads)") % (newheads - oldheads)
1554 heads = _(" (+%d heads)") % (newheads - oldheads)
1548
1555
1549 self.ui.status(_("added %d changesets"
1556 self.ui.status(_("added %d changesets"
1550 " with %d changes to %d files%s\n")
1557 " with %d changes to %d files%s\n")
1551 % (changesets, revisions, files, heads))
1558 % (changesets, revisions, files, heads))
1552
1559
1553 if changesets > 0:
1560 if changesets > 0:
1554 self.hook('pretxnchangegroup', throw=True,
1561 self.hook('pretxnchangegroup', throw=True,
1555 node=hex(self.changelog.node(cor+1)), source=srctype)
1562 node=hex(self.changelog.node(cor+1)), source=srctype)
1556
1563
1557 tr.close()
1564 tr.close()
1558
1565
1559 if changesets > 0:
1566 if changesets > 0:
1560 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1567 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1561 source=srctype)
1568 source=srctype)
1562
1569
1563 for i in range(cor + 1, cnr + 1):
1570 for i in range(cor + 1, cnr + 1):
1564 self.hook("incoming", node=hex(self.changelog.node(i)),
1571 self.hook("incoming", node=hex(self.changelog.node(i)),
1565 source=srctype)
1572 source=srctype)
1566
1573
1567 return newheads - oldheads + 1
1574 return newheads - oldheads + 1
1568
1575
1569 def update(self, node, allow=False, force=False, choose=None,
1576 def update(self, node, allow=False, force=False, choose=None,
1570 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1577 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1571 pl = self.dirstate.parents()
1578 pl = self.dirstate.parents()
1572 if not force and pl[1] != nullid:
1579 if not force and pl[1] != nullid:
1573 raise util.Abort(_("outstanding uncommitted merges"))
1580 raise util.Abort(_("outstanding uncommitted merges"))
1574
1581
1575 err = False
1582 err = False
1576
1583
1577 p1, p2 = pl[0], node
1584 p1, p2 = pl[0], node
1578 pa = self.changelog.ancestor(p1, p2)
1585 pa = self.changelog.ancestor(p1, p2)
1579 m1n = self.changelog.read(p1)[0]
1586 m1n = self.changelog.read(p1)[0]
1580 m2n = self.changelog.read(p2)[0]
1587 m2n = self.changelog.read(p2)[0]
1581 man = self.manifest.ancestor(m1n, m2n)
1588 man = self.manifest.ancestor(m1n, m2n)
1582 m1 = self.manifest.read(m1n)
1589 m1 = self.manifest.read(m1n)
1583 mf1 = self.manifest.readflags(m1n)
1590 mf1 = self.manifest.readflags(m1n)
1584 m2 = self.manifest.read(m2n).copy()
1591 m2 = self.manifest.read(m2n).copy()
1585 mf2 = self.manifest.readflags(m2n)
1592 mf2 = self.manifest.readflags(m2n)
1586 ma = self.manifest.read(man)
1593 ma = self.manifest.read(man)
1587 mfa = self.manifest.readflags(man)
1594 mfa = self.manifest.readflags(man)
1588
1595
1589 modified, added, removed, deleted, unknown = self.changes()
1596 modified, added, removed, deleted, unknown = self.changes()
1590
1597
1591 # is this a jump, or a merge? i.e. is there a linear path
1598 # is this a jump, or a merge? i.e. is there a linear path
1592 # from p1 to p2?
1599 # from p1 to p2?
1593 linear_path = (pa == p1 or pa == p2)
1600 linear_path = (pa == p1 or pa == p2)
1594
1601
1595 if allow and linear_path:
1602 if allow and linear_path:
1596 raise util.Abort(_("there is nothing to merge, "
1603 raise util.Abort(_("there is nothing to merge, "
1597 "just use 'hg update'"))
1604 "just use 'hg update'"))
1598 if allow and not forcemerge:
1605 if allow and not forcemerge:
1599 if modified or added or removed:
1606 if modified or added or removed:
1600 raise util.Abort(_("outstanding uncommitted changes"))
1607 raise util.Abort(_("outstanding uncommitted changes"))
1601
1608
1602 if not forcemerge and not force:
1609 if not forcemerge and not force:
1603 for f in unknown:
1610 for f in unknown:
1604 if f in m2:
1611 if f in m2:
1605 t1 = self.wread(f)
1612 t1 = self.wread(f)
1606 t2 = self.file(f).read(m2[f])
1613 t2 = self.file(f).read(m2[f])
1607 if cmp(t1, t2) != 0:
1614 if cmp(t1, t2) != 0:
1608 raise util.Abort(_("'%s' already exists in the working"
1615 raise util.Abort(_("'%s' already exists in the working"
1609 " dir and differs from remote") % f)
1616 " dir and differs from remote") % f)
1610
1617
1611 # resolve the manifest to determine which files
1618 # resolve the manifest to determine which files
1612 # we care about merging
1619 # we care about merging
1613 self.ui.note(_("resolving manifests\n"))
1620 self.ui.note(_("resolving manifests\n"))
1614 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1621 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1615 (force, allow, moddirstate, linear_path))
1622 (force, allow, moddirstate, linear_path))
1616 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1623 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1617 (short(man), short(m1n), short(m2n)))
1624 (short(man), short(m1n), short(m2n)))
1618
1625
1619 merge = {}
1626 merge = {}
1620 get = {}
1627 get = {}
1621 remove = []
1628 remove = []
1622
1629
1623 # construct a working dir manifest
1630 # construct a working dir manifest
1624 mw = m1.copy()
1631 mw = m1.copy()
1625 mfw = mf1.copy()
1632 mfw = mf1.copy()
1626 umap = dict.fromkeys(unknown)
1633 umap = dict.fromkeys(unknown)
1627
1634
1628 for f in added + modified + unknown:
1635 for f in added + modified + unknown:
1629 mw[f] = ""
1636 mw[f] = ""
1630 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1637 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1631
1638
1632 if moddirstate and not wlock:
1639 if moddirstate and not wlock:
1633 wlock = self.wlock()
1640 wlock = self.wlock()
1634
1641
1635 for f in deleted + removed:
1642 for f in deleted + removed:
1636 if f in mw:
1643 if f in mw:
1637 del mw[f]
1644 del mw[f]
1638
1645
1639 # If we're jumping between revisions (as opposed to merging),
1646 # If we're jumping between revisions (as opposed to merging),
1640 # and if neither the working directory nor the target rev has
1647 # and if neither the working directory nor the target rev has
1641 # the file, then we need to remove it from the dirstate, to
1648 # the file, then we need to remove it from the dirstate, to
1642 # prevent the dirstate from listing the file when it is no
1649 # prevent the dirstate from listing the file when it is no
1643 # longer in the manifest.
1650 # longer in the manifest.
1644 if moddirstate and linear_path and f not in m2:
1651 if moddirstate and linear_path and f not in m2:
1645 self.dirstate.forget((f,))
1652 self.dirstate.forget((f,))
1646
1653
1647 # Compare manifests
1654 # Compare manifests
1648 for f, n in mw.iteritems():
1655 for f, n in mw.iteritems():
1649 if choose and not choose(f):
1656 if choose and not choose(f):
1650 continue
1657 continue
1651 if f in m2:
1658 if f in m2:
1652 s = 0
1659 s = 0
1653
1660
1654 # is the wfile new since m1, and match m2?
1661 # is the wfile new since m1, and match m2?
1655 if f not in m1:
1662 if f not in m1:
1656 t1 = self.wread(f)
1663 t1 = self.wread(f)
1657 t2 = self.file(f).read(m2[f])
1664 t2 = self.file(f).read(m2[f])
1658 if cmp(t1, t2) == 0:
1665 if cmp(t1, t2) == 0:
1659 n = m2[f]
1666 n = m2[f]
1660 del t1, t2
1667 del t1, t2
1661
1668
1662 # are files different?
1669 # are files different?
1663 if n != m2[f]:
1670 if n != m2[f]:
1664 a = ma.get(f, nullid)
1671 a = ma.get(f, nullid)
1665 # are both different from the ancestor?
1672 # are both different from the ancestor?
1666 if n != a and m2[f] != a:
1673 if n != a and m2[f] != a:
1667 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1674 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1668 # merge executable bits
1675 # merge executable bits
1669 # "if we changed or they changed, change in merge"
1676 # "if we changed or they changed, change in merge"
1670 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1677 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1671 mode = ((a^b) | (a^c)) ^ a
1678 mode = ((a^b) | (a^c)) ^ a
1672 merge[f] = (m1.get(f, nullid), m2[f], mode)
1679 merge[f] = (m1.get(f, nullid), m2[f], mode)
1673 s = 1
1680 s = 1
1674 # are we clobbering?
1681 # are we clobbering?
1675 # is remote's version newer?
1682 # is remote's version newer?
1676 # or are we going back in time?
1683 # or are we going back in time?
1677 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1684 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1678 self.ui.debug(_(" remote %s is newer, get\n") % f)
1685 self.ui.debug(_(" remote %s is newer, get\n") % f)
1679 get[f] = m2[f]
1686 get[f] = m2[f]
1680 s = 1
1687 s = 1
1681 elif f in umap or f in added:
1688 elif f in umap or f in added:
1682 # this unknown file is the same as the checkout
1689 # this unknown file is the same as the checkout
1683 # we need to reset the dirstate if the file was added
1690 # we need to reset the dirstate if the file was added
1684 get[f] = m2[f]
1691 get[f] = m2[f]
1685
1692
1686 if not s and mfw[f] != mf2[f]:
1693 if not s and mfw[f] != mf2[f]:
1687 if force:
1694 if force:
1688 self.ui.debug(_(" updating permissions for %s\n") % f)
1695 self.ui.debug(_(" updating permissions for %s\n") % f)
1689 util.set_exec(self.wjoin(f), mf2[f])
1696 util.set_exec(self.wjoin(f), mf2[f])
1690 else:
1697 else:
1691 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1692 mode = ((a^b) | (a^c)) ^ a
1699 mode = ((a^b) | (a^c)) ^ a
1693 if mode != b:
1700 if mode != b:
1694 self.ui.debug(_(" updating permissions for %s\n")
1701 self.ui.debug(_(" updating permissions for %s\n")
1695 % f)
1702 % f)
1696 util.set_exec(self.wjoin(f), mode)
1703 util.set_exec(self.wjoin(f), mode)
1697 del m2[f]
1704 del m2[f]
1698 elif f in ma:
1705 elif f in ma:
1699 if n != ma[f]:
1706 if n != ma[f]:
1700 r = _("d")
1707 r = _("d")
1701 if not force and (linear_path or allow):
1708 if not force and (linear_path or allow):
1702 r = self.ui.prompt(
1709 r = self.ui.prompt(
1703 (_(" local changed %s which remote deleted\n") % f) +
1710 (_(" local changed %s which remote deleted\n") % f) +
1704 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1711 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1705 if r == _("d"):
1712 if r == _("d"):
1706 remove.append(f)
1713 remove.append(f)
1707 else:
1714 else:
1708 self.ui.debug(_("other deleted %s\n") % f)
1715 self.ui.debug(_("other deleted %s\n") % f)
1709 remove.append(f) # other deleted it
1716 remove.append(f) # other deleted it
1710 else:
1717 else:
1711 # file is created on branch or in working directory
1718 # file is created on branch or in working directory
1712 if force and f not in umap:
1719 if force and f not in umap:
1713 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1720 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1714 remove.append(f)
1721 remove.append(f)
1715 elif n == m1.get(f, nullid): # same as parent
1722 elif n == m1.get(f, nullid): # same as parent
1716 if p2 == pa: # going backwards?
1723 if p2 == pa: # going backwards?
1717 self.ui.debug(_("remote deleted %s\n") % f)
1724 self.ui.debug(_("remote deleted %s\n") % f)
1718 remove.append(f)
1725 remove.append(f)
1719 else:
1726 else:
1720 self.ui.debug(_("local modified %s, keeping\n") % f)
1727 self.ui.debug(_("local modified %s, keeping\n") % f)
1721 else:
1728 else:
1722 self.ui.debug(_("working dir created %s, keeping\n") % f)
1729 self.ui.debug(_("working dir created %s, keeping\n") % f)
1723
1730
1724 for f, n in m2.iteritems():
1731 for f, n in m2.iteritems():
1725 if choose and not choose(f):
1732 if choose and not choose(f):
1726 continue
1733 continue
1727 if f[0] == "/":
1734 if f[0] == "/":
1728 continue
1735 continue
1729 if f in ma and n != ma[f]:
1736 if f in ma and n != ma[f]:
1730 r = _("k")
1737 r = _("k")
1731 if not force and (linear_path or allow):
1738 if not force and (linear_path or allow):
1732 r = self.ui.prompt(
1739 r = self.ui.prompt(
1733 (_("remote changed %s which local deleted\n") % f) +
1740 (_("remote changed %s which local deleted\n") % f) +
1734 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1741 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1735 if r == _("k"):
1742 if r == _("k"):
1736 get[f] = n
1743 get[f] = n
1737 elif f not in ma:
1744 elif f not in ma:
1738 self.ui.debug(_("remote created %s\n") % f)
1745 self.ui.debug(_("remote created %s\n") % f)
1739 get[f] = n
1746 get[f] = n
1740 else:
1747 else:
1741 if force or p2 == pa: # going backwards?
1748 if force or p2 == pa: # going backwards?
1742 self.ui.debug(_("local deleted %s, recreating\n") % f)
1749 self.ui.debug(_("local deleted %s, recreating\n") % f)
1743 get[f] = n
1750 get[f] = n
1744 else:
1751 else:
1745 self.ui.debug(_("local deleted %s\n") % f)
1752 self.ui.debug(_("local deleted %s\n") % f)
1746
1753
1747 del mw, m1, m2, ma
1754 del mw, m1, m2, ma
1748
1755
1749 if force:
1756 if force:
1750 for f in merge:
1757 for f in merge:
1751 get[f] = merge[f][1]
1758 get[f] = merge[f][1]
1752 merge = {}
1759 merge = {}
1753
1760
1754 if linear_path or force:
1761 if linear_path or force:
1755 # we don't need to do any magic, just jump to the new rev
1762 # we don't need to do any magic, just jump to the new rev
1756 branch_merge = False
1763 branch_merge = False
1757 p1, p2 = p2, nullid
1764 p1, p2 = p2, nullid
1758 else:
1765 else:
1759 if not allow:
1766 if not allow:
1760 self.ui.status(_("this update spans a branch"
1767 self.ui.status(_("this update spans a branch"
1761 " affecting the following files:\n"))
1768 " affecting the following files:\n"))
1762 fl = merge.keys() + get.keys()
1769 fl = merge.keys() + get.keys()
1763 fl.sort()
1770 fl.sort()
1764 for f in fl:
1771 for f in fl:
1765 cf = ""
1772 cf = ""
1766 if f in merge:
1773 if f in merge:
1767 cf = _(" (resolve)")
1774 cf = _(" (resolve)")
1768 self.ui.status(" %s%s\n" % (f, cf))
1775 self.ui.status(" %s%s\n" % (f, cf))
1769 self.ui.warn(_("aborting update spanning branches!\n"))
1776 self.ui.warn(_("aborting update spanning branches!\n"))
1770 self.ui.status(_("(use 'hg merge' to merge across branches"
1777 self.ui.status(_("(use 'hg merge' to merge across branches"
1771 " or 'hg update -C' to lose changes)\n"))
1778 " or 'hg update -C' to lose changes)\n"))
1772 return 1
1779 return 1
1773 branch_merge = True
1780 branch_merge = True
1774
1781
1775 xp1 = hex(p1)
1782 xp1 = hex(p1)
1776 xp2 = hex(p2)
1783 xp2 = hex(p2)
1777 if p2 == nullid: xxp2 = ''
1784 if p2 == nullid: xxp2 = ''
1778 else: xxp2 = xp2
1785 else: xxp2 = xp2
1779
1786
1780 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1787 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1781
1788
1782 # get the files we don't need to change
1789 # get the files we don't need to change
1783 files = get.keys()
1790 files = get.keys()
1784 files.sort()
1791 files.sort()
1785 for f in files:
1792 for f in files:
1786 if f[0] == "/":
1793 if f[0] == "/":
1787 continue
1794 continue
1788 self.ui.note(_("getting %s\n") % f)
1795 self.ui.note(_("getting %s\n") % f)
1789 t = self.file(f).read(get[f])
1796 t = self.file(f).read(get[f])
1790 self.wwrite(f, t)
1797 self.wwrite(f, t)
1791 util.set_exec(self.wjoin(f), mf2[f])
1798 util.set_exec(self.wjoin(f), mf2[f])
1792 if moddirstate:
1799 if moddirstate:
1793 if branch_merge:
1800 if branch_merge:
1794 self.dirstate.update([f], 'n', st_mtime=-1)
1801 self.dirstate.update([f], 'n', st_mtime=-1)
1795 else:
1802 else:
1796 self.dirstate.update([f], 'n')
1803 self.dirstate.update([f], 'n')
1797
1804
1798 # merge the tricky bits
1805 # merge the tricky bits
1799 failedmerge = []
1806 failedmerge = []
1800 files = merge.keys()
1807 files = merge.keys()
1801 files.sort()
1808 files.sort()
1802 for f in files:
1809 for f in files:
1803 self.ui.status(_("merging %s\n") % f)
1810 self.ui.status(_("merging %s\n") % f)
1804 my, other, flag = merge[f]
1811 my, other, flag = merge[f]
1805 ret = self.merge3(f, my, other, xp1, xp2)
1812 ret = self.merge3(f, my, other, xp1, xp2)
1806 if ret:
1813 if ret:
1807 err = True
1814 err = True
1808 failedmerge.append(f)
1815 failedmerge.append(f)
1809 util.set_exec(self.wjoin(f), flag)
1816 util.set_exec(self.wjoin(f), flag)
1810 if moddirstate:
1817 if moddirstate:
1811 if branch_merge:
1818 if branch_merge:
1812 # We've done a branch merge, mark this file as merged
1819 # We've done a branch merge, mark this file as merged
1813 # so that we properly record the merger later
1820 # so that we properly record the merger later
1814 self.dirstate.update([f], 'm')
1821 self.dirstate.update([f], 'm')
1815 else:
1822 else:
1816 # We've update-merged a locally modified file, so
1823 # We've update-merged a locally modified file, so
1817 # we set the dirstate to emulate a normal checkout
1824 # we set the dirstate to emulate a normal checkout
1818 # of that file some time in the past. Thus our
1825 # of that file some time in the past. Thus our
1819 # merge will appear as a normal local file
1826 # merge will appear as a normal local file
1820 # modification.
1827 # modification.
1821 f_len = len(self.file(f).read(other))
1828 f_len = len(self.file(f).read(other))
1822 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1829 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1823
1830
1824 remove.sort()
1831 remove.sort()
1825 for f in remove:
1832 for f in remove:
1826 self.ui.note(_("removing %s\n") % f)
1833 self.ui.note(_("removing %s\n") % f)
1827 util.audit_path(f)
1834 util.audit_path(f)
1828 try:
1835 try:
1829 util.unlink(self.wjoin(f))
1836 util.unlink(self.wjoin(f))
1830 except OSError, inst:
1837 except OSError, inst:
1831 if inst.errno != errno.ENOENT:
1838 if inst.errno != errno.ENOENT:
1832 self.ui.warn(_("update failed to remove %s: %s!\n") %
1839 self.ui.warn(_("update failed to remove %s: %s!\n") %
1833 (f, inst.strerror))
1840 (f, inst.strerror))
1834 if moddirstate:
1841 if moddirstate:
1835 if branch_merge:
1842 if branch_merge:
1836 self.dirstate.update(remove, 'r')
1843 self.dirstate.update(remove, 'r')
1837 else:
1844 else:
1838 self.dirstate.forget(remove)
1845 self.dirstate.forget(remove)
1839
1846
1840 if moddirstate:
1847 if moddirstate:
1841 self.dirstate.setparents(p1, p2)
1848 self.dirstate.setparents(p1, p2)
1842
1849
1843 if show_stats:
1850 if show_stats:
1844 stats = ((len(get), _("updated")),
1851 stats = ((len(get), _("updated")),
1845 (len(merge) - len(failedmerge), _("merged")),
1852 (len(merge) - len(failedmerge), _("merged")),
1846 (len(remove), _("removed")),
1853 (len(remove), _("removed")),
1847 (len(failedmerge), _("unresolved")))
1854 (len(failedmerge), _("unresolved")))
1848 note = ", ".join([_("%d files %s") % s for s in stats])
1855 note = ", ".join([_("%d files %s") % s for s in stats])
1849 self.ui.status("%s\n" % note)
1856 self.ui.status("%s\n" % note)
1850 if moddirstate:
1857 if moddirstate:
1851 if branch_merge:
1858 if branch_merge:
1852 if failedmerge:
1859 if failedmerge:
1853 self.ui.status(_("There are unresolved merges,"
1860 self.ui.status(_("There are unresolved merges,"
1854 " you can redo the full merge using:\n"
1861 " you can redo the full merge using:\n"
1855 " hg update -C %s\n"
1862 " hg update -C %s\n"
1856 " hg merge %s\n"
1863 " hg merge %s\n"
1857 % (self.changelog.rev(p1),
1864 % (self.changelog.rev(p1),
1858 self.changelog.rev(p2))))
1865 self.changelog.rev(p2))))
1859 else:
1866 else:
1860 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1867 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1861 elif failedmerge:
1868 elif failedmerge:
1862 self.ui.status(_("There are unresolved merges with"
1869 self.ui.status(_("There are unresolved merges with"
1863 " locally modified files.\n"))
1870 " locally modified files.\n"))
1864
1871
1865 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1872 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1866 return err
1873 return err
1867
1874
1868 def merge3(self, fn, my, other, p1, p2):
1875 def merge3(self, fn, my, other, p1, p2):
1869 """perform a 3-way merge in the working directory"""
1876 """perform a 3-way merge in the working directory"""
1870
1877
1871 def temp(prefix, node):
1878 def temp(prefix, node):
1872 pre = "%s~%s." % (os.path.basename(fn), prefix)
1879 pre = "%s~%s." % (os.path.basename(fn), prefix)
1873 (fd, name) = tempfile.mkstemp(prefix=pre)
1880 (fd, name) = tempfile.mkstemp(prefix=pre)
1874 f = os.fdopen(fd, "wb")
1881 f = os.fdopen(fd, "wb")
1875 self.wwrite(fn, fl.read(node), f)
1882 self.wwrite(fn, fl.read(node), f)
1876 f.close()
1883 f.close()
1877 return name
1884 return name
1878
1885
1879 fl = self.file(fn)
1886 fl = self.file(fn)
1880 base = fl.ancestor(my, other)
1887 base = fl.ancestor(my, other)
1881 a = self.wjoin(fn)
1888 a = self.wjoin(fn)
1882 b = temp("base", base)
1889 b = temp("base", base)
1883 c = temp("other", other)
1890 c = temp("other", other)
1884
1891
1885 self.ui.note(_("resolving %s\n") % fn)
1892 self.ui.note(_("resolving %s\n") % fn)
1886 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1893 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1887 (fn, short(my), short(other), short(base)))
1894 (fn, short(my), short(other), short(base)))
1888
1895
1889 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1896 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1890 or "hgmerge")
1897 or "hgmerge")
1891 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1898 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1892 environ={'HG_FILE': fn,
1899 environ={'HG_FILE': fn,
1893 'HG_MY_NODE': p1,
1900 'HG_MY_NODE': p1,
1894 'HG_OTHER_NODE': p2,
1901 'HG_OTHER_NODE': p2,
1895 'HG_FILE_MY_NODE': hex(my),
1902 'HG_FILE_MY_NODE': hex(my),
1896 'HG_FILE_OTHER_NODE': hex(other),
1903 'HG_FILE_OTHER_NODE': hex(other),
1897 'HG_FILE_BASE_NODE': hex(base)})
1904 'HG_FILE_BASE_NODE': hex(base)})
1898 if r:
1905 if r:
1899 self.ui.warn(_("merging %s failed!\n") % fn)
1906 self.ui.warn(_("merging %s failed!\n") % fn)
1900
1907
1901 os.unlink(b)
1908 os.unlink(b)
1902 os.unlink(c)
1909 os.unlink(c)
1903 return r
1910 return r
1904
1911
1905 def verify(self):
1912 def verify(self):
1906 filelinkrevs = {}
1913 filelinkrevs = {}
1907 filenodes = {}
1914 filenodes = {}
1908 changesets = revisions = files = 0
1915 changesets = revisions = files = 0
1909 errors = [0]
1916 errors = [0]
1910 warnings = [0]
1917 warnings = [0]
1911 neededmanifests = {}
1918 neededmanifests = {}
1912
1919
1913 def err(msg):
1920 def err(msg):
1914 self.ui.warn(msg + "\n")
1921 self.ui.warn(msg + "\n")
1915 errors[0] += 1
1922 errors[0] += 1
1916
1923
1917 def warn(msg):
1924 def warn(msg):
1918 self.ui.warn(msg + "\n")
1925 self.ui.warn(msg + "\n")
1919 warnings[0] += 1
1926 warnings[0] += 1
1920
1927
1921 def checksize(obj, name):
1928 def checksize(obj, name):
1922 d = obj.checksize()
1929 d = obj.checksize()
1923 if d[0]:
1930 if d[0]:
1924 err(_("%s data length off by %d bytes") % (name, d[0]))
1931 err(_("%s data length off by %d bytes") % (name, d[0]))
1925 if d[1]:
1932 if d[1]:
1926 err(_("%s index contains %d extra bytes") % (name, d[1]))
1933 err(_("%s index contains %d extra bytes") % (name, d[1]))
1927
1934
1928 def checkversion(obj, name):
1935 def checkversion(obj, name):
1929 if obj.version != revlog.REVLOGV0:
1936 if obj.version != revlog.REVLOGV0:
1930 if not revlogv1:
1937 if not revlogv1:
1931 warn(_("warning: `%s' uses revlog format 1") % name)
1938 warn(_("warning: `%s' uses revlog format 1") % name)
1932 elif revlogv1:
1939 elif revlogv1:
1933 warn(_("warning: `%s' uses revlog format 0") % name)
1940 warn(_("warning: `%s' uses revlog format 0") % name)
1934
1941
1935 revlogv1 = self.revlogversion != revlog.REVLOGV0
1942 revlogv1 = self.revlogversion != revlog.REVLOGV0
1936 if self.ui.verbose or revlogv1 != self.revlogv1:
1943 if self.ui.verbose or revlogv1 != self.revlogv1:
1937 self.ui.status(_("repository uses revlog format %d\n") %
1944 self.ui.status(_("repository uses revlog format %d\n") %
1938 (revlogv1 and 1 or 0))
1945 (revlogv1 and 1 or 0))
1939
1946
1940 seen = {}
1947 seen = {}
1941 self.ui.status(_("checking changesets\n"))
1948 self.ui.status(_("checking changesets\n"))
1942 checksize(self.changelog, "changelog")
1949 checksize(self.changelog, "changelog")
1943
1950
1944 for i in range(self.changelog.count()):
1951 for i in range(self.changelog.count()):
1945 changesets += 1
1952 changesets += 1
1946 n = self.changelog.node(i)
1953 n = self.changelog.node(i)
1947 l = self.changelog.linkrev(n)
1954 l = self.changelog.linkrev(n)
1948 if l != i:
1955 if l != i:
1949 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1956 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1950 if n in seen:
1957 if n in seen:
1951 err(_("duplicate changeset at revision %d") % i)
1958 err(_("duplicate changeset at revision %d") % i)
1952 seen[n] = 1
1959 seen[n] = 1
1953
1960
1954 for p in self.changelog.parents(n):
1961 for p in self.changelog.parents(n):
1955 if p not in self.changelog.nodemap:
1962 if p not in self.changelog.nodemap:
1956 err(_("changeset %s has unknown parent %s") %
1963 err(_("changeset %s has unknown parent %s") %
1957 (short(n), short(p)))
1964 (short(n), short(p)))
1958 try:
1965 try:
1959 changes = self.changelog.read(n)
1966 changes = self.changelog.read(n)
1960 except KeyboardInterrupt:
1967 except KeyboardInterrupt:
1961 self.ui.warn(_("interrupted"))
1968 self.ui.warn(_("interrupted"))
1962 raise
1969 raise
1963 except Exception, inst:
1970 except Exception, inst:
1964 err(_("unpacking changeset %s: %s") % (short(n), inst))
1971 err(_("unpacking changeset %s: %s") % (short(n), inst))
1965 continue
1972 continue
1966
1973
1967 neededmanifests[changes[0]] = n
1974 neededmanifests[changes[0]] = n
1968
1975
1969 for f in changes[3]:
1976 for f in changes[3]:
1970 filelinkrevs.setdefault(f, []).append(i)
1977 filelinkrevs.setdefault(f, []).append(i)
1971
1978
1972 seen = {}
1979 seen = {}
1973 self.ui.status(_("checking manifests\n"))
1980 self.ui.status(_("checking manifests\n"))
1974 checkversion(self.manifest, "manifest")
1981 checkversion(self.manifest, "manifest")
1975 checksize(self.manifest, "manifest")
1982 checksize(self.manifest, "manifest")
1976
1983
1977 for i in range(self.manifest.count()):
1984 for i in range(self.manifest.count()):
1978 n = self.manifest.node(i)
1985 n = self.manifest.node(i)
1979 l = self.manifest.linkrev(n)
1986 l = self.manifest.linkrev(n)
1980
1987
1981 if l < 0 or l >= self.changelog.count():
1988 if l < 0 or l >= self.changelog.count():
1982 err(_("bad manifest link (%d) at revision %d") % (l, i))
1989 err(_("bad manifest link (%d) at revision %d") % (l, i))
1983
1990
1984 if n in neededmanifests:
1991 if n in neededmanifests:
1985 del neededmanifests[n]
1992 del neededmanifests[n]
1986
1993
1987 if n in seen:
1994 if n in seen:
1988 err(_("duplicate manifest at revision %d") % i)
1995 err(_("duplicate manifest at revision %d") % i)
1989
1996
1990 seen[n] = 1
1997 seen[n] = 1
1991
1998
1992 for p in self.manifest.parents(n):
1999 for p in self.manifest.parents(n):
1993 if p not in self.manifest.nodemap:
2000 if p not in self.manifest.nodemap:
1994 err(_("manifest %s has unknown parent %s") %
2001 err(_("manifest %s has unknown parent %s") %
1995 (short(n), short(p)))
2002 (short(n), short(p)))
1996
2003
1997 try:
2004 try:
1998 delta = mdiff.patchtext(self.manifest.delta(n))
2005 delta = mdiff.patchtext(self.manifest.delta(n))
1999 except KeyboardInterrupt:
2006 except KeyboardInterrupt:
2000 self.ui.warn(_("interrupted"))
2007 self.ui.warn(_("interrupted"))
2001 raise
2008 raise
2002 except Exception, inst:
2009 except Exception, inst:
2003 err(_("unpacking manifest %s: %s") % (short(n), inst))
2010 err(_("unpacking manifest %s: %s") % (short(n), inst))
2004 continue
2011 continue
2005
2012
2006 try:
2013 try:
2007 ff = [ l.split('\0') for l in delta.splitlines() ]
2014 ff = [ l.split('\0') for l in delta.splitlines() ]
2008 for f, fn in ff:
2015 for f, fn in ff:
2009 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2016 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2010 except (ValueError, TypeError), inst:
2017 except (ValueError, TypeError), inst:
2011 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2018 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2012
2019
2013 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2020 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2014
2021
2015 for m, c in neededmanifests.items():
2022 for m, c in neededmanifests.items():
2016 err(_("Changeset %s refers to unknown manifest %s") %
2023 err(_("Changeset %s refers to unknown manifest %s") %
2017 (short(m), short(c)))
2024 (short(m), short(c)))
2018 del neededmanifests
2025 del neededmanifests
2019
2026
2020 for f in filenodes:
2027 for f in filenodes:
2021 if f not in filelinkrevs:
2028 if f not in filelinkrevs:
2022 err(_("file %s in manifest but not in changesets") % f)
2029 err(_("file %s in manifest but not in changesets") % f)
2023
2030
2024 for f in filelinkrevs:
2031 for f in filelinkrevs:
2025 if f not in filenodes:
2032 if f not in filenodes:
2026 err(_("file %s in changeset but not in manifest") % f)
2033 err(_("file %s in changeset but not in manifest") % f)
2027
2034
2028 self.ui.status(_("checking files\n"))
2035 self.ui.status(_("checking files\n"))
2029 ff = filenodes.keys()
2036 ff = filenodes.keys()
2030 ff.sort()
2037 ff.sort()
2031 for f in ff:
2038 for f in ff:
2032 if f == "/dev/null":
2039 if f == "/dev/null":
2033 continue
2040 continue
2034 files += 1
2041 files += 1
2035 if not f:
2042 if not f:
2036 err(_("file without name in manifest %s") % short(n))
2043 err(_("file without name in manifest %s") % short(n))
2037 continue
2044 continue
2038 fl = self.file(f)
2045 fl = self.file(f)
2039 checkversion(fl, f)
2046 checkversion(fl, f)
2040 checksize(fl, f)
2047 checksize(fl, f)
2041
2048
2042 nodes = {nullid: 1}
2049 nodes = {nullid: 1}
2043 seen = {}
2050 seen = {}
2044 for i in range(fl.count()):
2051 for i in range(fl.count()):
2045 revisions += 1
2052 revisions += 1
2046 n = fl.node(i)
2053 n = fl.node(i)
2047
2054
2048 if n in seen:
2055 if n in seen:
2049 err(_("%s: duplicate revision %d") % (f, i))
2056 err(_("%s: duplicate revision %d") % (f, i))
2050 if n not in filenodes[f]:
2057 if n not in filenodes[f]:
2051 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2058 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2052 else:
2059 else:
2053 del filenodes[f][n]
2060 del filenodes[f][n]
2054
2061
2055 flr = fl.linkrev(n)
2062 flr = fl.linkrev(n)
2056 if flr not in filelinkrevs.get(f, []):
2063 if flr not in filelinkrevs.get(f, []):
2057 err(_("%s:%s points to unexpected changeset %d")
2064 err(_("%s:%s points to unexpected changeset %d")
2058 % (f, short(n), flr))
2065 % (f, short(n), flr))
2059 else:
2066 else:
2060 filelinkrevs[f].remove(flr)
2067 filelinkrevs[f].remove(flr)
2061
2068
2062 # verify contents
2069 # verify contents
2063 try:
2070 try:
2064 t = fl.read(n)
2071 t = fl.read(n)
2065 except KeyboardInterrupt:
2072 except KeyboardInterrupt:
2066 self.ui.warn(_("interrupted"))
2073 self.ui.warn(_("interrupted"))
2067 raise
2074 raise
2068 except Exception, inst:
2075 except Exception, inst:
2069 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2076 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2070
2077
2071 # verify parents
2078 # verify parents
2072 (p1, p2) = fl.parents(n)
2079 (p1, p2) = fl.parents(n)
2073 if p1 not in nodes:
2080 if p1 not in nodes:
2074 err(_("file %s:%s unknown parent 1 %s") %
2081 err(_("file %s:%s unknown parent 1 %s") %
2075 (f, short(n), short(p1)))
2082 (f, short(n), short(p1)))
2076 if p2 not in nodes:
2083 if p2 not in nodes:
2077 err(_("file %s:%s unknown parent 2 %s") %
2084 err(_("file %s:%s unknown parent 2 %s") %
2078 (f, short(n), short(p1)))
2085 (f, short(n), short(p1)))
2079 nodes[n] = 1
2086 nodes[n] = 1
2080
2087
2081 # cross-check
2088 # cross-check
2082 for node in filenodes[f]:
2089 for node in filenodes[f]:
2083 err(_("node %s in manifests not in %s") % (hex(node), f))
2090 err(_("node %s in manifests not in %s") % (hex(node), f))
2084
2091
2085 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2092 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2086 (files, changesets, revisions))
2093 (files, changesets, revisions))
2087
2094
2088 if warnings[0]:
2095 if warnings[0]:
2089 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2096 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2090 if errors[0]:
2097 if errors[0]:
2091 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2098 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2092 return 1
2099 return 1
2093
2100
2094 # used to avoid circular references so destructors work
2101 # used to avoid circular references so destructors work
2095 def aftertrans(base):
2102 def aftertrans(base):
2096 p = base
2103 p = base
2097 def a():
2104 def a():
2098 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2105 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2099 util.rename(os.path.join(p, "journal.dirstate"),
2106 util.rename(os.path.join(p, "journal.dirstate"),
2100 os.path.join(p, "undo.dirstate"))
2107 os.path.join(p, "undo.dirstate"))
2101 return a
2108 return a
2102
2109
@@ -1,176 +1,189
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct
8 import struct
9 from revlog import *
9 from revlog import *
10 from i18n import gettext as _
10 from i18n import gettext as _
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "bisect array")
12 demandload(globals(), "bisect array")
13
13
14 class manifest(revlog):
14 class manifest(revlog):
15 def __init__(self, opener, defversion=REVLOGV0):
15 def __init__(self, opener, defversion=REVLOGV0):
16 self.mapcache = None
16 self.mapcache = None
17 self.listcache = None
17 self.listcache = None
18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
19 defversion)
19 defversion)
20
20
21 def read(self, node):
21 def read(self, node):
22 if node == nullid: return {} # don't upset local cache
22 if node == nullid: return {} # don't upset local cache
23 if self.mapcache and self.mapcache[0] == node:
23 if self.mapcache and self.mapcache[0] == node:
24 return self.mapcache[1]
24 return self.mapcache[1]
25 text = self.revision(node)
25 text = self.revision(node)
26 map = {}
26 map = {}
27 flag = {}
27 flag = {}
28 self.listcache = array.array('c', text)
28 self.listcache = array.array('c', text)
29 lines = text.splitlines(1)
29 lines = text.splitlines(1)
30 for l in lines:
30 for l in lines:
31 (f, n) = l.split('\0')
31 (f, n) = l.split('\0')
32 map[f] = bin(n[:40])
32 map[f] = bin(n[:40])
33 flag[f] = (n[40:-1] == "x")
33 flag[f] = (n[40:-1] == "x")
34 self.mapcache = (node, map, flag)
34 self.mapcache = (node, map, flag)
35 return map
35 return map
36
36
37 def readflags(self, node):
37 def readflags(self, node):
38 if node == nullid: return {} # don't upset local cache
38 if node == nullid: return {} # don't upset local cache
39 if not self.mapcache or self.mapcache[0] != node:
39 if not self.mapcache or self.mapcache[0] != node:
40 self.read(node)
40 self.read(node)
41 return self.mapcache[2]
41 return self.mapcache[2]
42
42
43 def diff(self, a, b):
43 def diff(self, a, b):
44 return mdiff.textdiff(str(a), str(b))
44 return mdiff.textdiff(str(a), str(b))
45
45
46 def _search(self, m, s, lo=0, hi=None):
47 '''return a tuple (start, end) that says where to find s within m.
48
49 If the string is found m[start:end] are the line containing
50 that string. If start == end the string was not found and
51 they indicate the proper sorted insertion point. This was
52 taken from bisect_left, and modified to find line start/end as
53 it goes along.
54
55 m should be a buffer or a string
56 s is a string'''
57 def advance(i, c):
58 while i < lenm and m[i] != c:
59 i += 1
60 return i
61 lenm = len(m)
62 if not hi:
63 hi = lenm
64 while lo < hi:
65 mid = (lo + hi) // 2
66 start = mid
67 while start > 0 and m[start-1] != '\n':
68 start -= 1
69 end = advance(start, '\0')
70 if m[start:end] < s:
71 # we know that after the null there are 40 bytes of sha1
72 # this translates to the bisect lo = mid + 1
73 lo = advance(end + 40, '\n') + 1
74 else:
75 # this translates to the bisect hi = mid
76 hi = start
77 end = advance(lo, '\0')
78 found = m[lo:end]
79 if cmp(s, found) == 0:
80 # we know that after the null there are 40 bytes of sha1
81 end = advance(end + 40, '\n')
82 return (lo, end+1)
83 else:
84 return (lo, lo)
85
86 def find(self, node, f):
87 '''look up entry for a single file efficiently.
88 return (node, flag) pair if found, (None, None) if not.'''
89 if self.mapcache and node == self.mapcache[0]:
90 return self.mapcache[1].get(f), self.mapcache[2].get(f)
91 text = self.revision(node)
92 start, end = self._search(text, f)
93 if start == end:
94 return None, None
95 l = text[start:end]
96 f, n = l.split('\0')
97 return bin(n[:40]), n[40:-1] == 'x'
98
46 def add(self, map, flags, transaction, link, p1=None, p2=None,
99 def add(self, map, flags, transaction, link, p1=None, p2=None,
47 changed=None):
100 changed=None):
48
49 # returns a tuple (start, end). If the string is found
50 # m[start:end] are the line containing that string. If start == end
51 # the string was not found and they indicate the proper sorted
52 # insertion point. This was taken from bisect_left, and modified
53 # to find line start/end as it goes along.
54 #
55 # m should be a buffer or a string
56 # s is a string
57 #
58 def manifestsearch(m, s, lo=0, hi=None):
59 def advance(i, c):
60 while i < lenm and m[i] != c:
61 i += 1
62 return i
63 lenm = len(m)
64 if not hi:
65 hi = lenm
66 while lo < hi:
67 mid = (lo + hi) // 2
68 start = mid
69 while start > 0 and m[start-1] != '\n':
70 start -= 1
71 end = advance(start, '\0')
72 if m[start:end] < s:
73 # we know that after the null there are 40 bytes of sha1
74 # this translates to the bisect lo = mid + 1
75 lo = advance(end + 40, '\n') + 1
76 else:
77 # this translates to the bisect hi = mid
78 hi = start
79 end = advance(lo, '\0')
80 found = m[lo:end]
81 if cmp(s, found) == 0:
82 # we know that after the null there are 40 bytes of sha1
83 end = advance(end + 40, '\n')
84 return (lo, end+1)
85 else:
86 return (lo, lo)
87
88 # apply the changes collected during the bisect loop to our addlist
101 # apply the changes collected during the bisect loop to our addlist
89 # return a delta suitable for addrevision
102 # return a delta suitable for addrevision
90 def addlistdelta(addlist, x):
103 def addlistdelta(addlist, x):
91 # start from the bottom up
104 # start from the bottom up
92 # so changes to the offsets don't mess things up.
105 # so changes to the offsets don't mess things up.
93 i = len(x)
106 i = len(x)
94 while i > 0:
107 while i > 0:
95 i -= 1
108 i -= 1
96 start = x[i][0]
109 start = x[i][0]
97 end = x[i][1]
110 end = x[i][1]
98 if x[i][2]:
111 if x[i][2]:
99 addlist[start:end] = array.array('c', x[i][2])
112 addlist[start:end] = array.array('c', x[i][2])
100 else:
113 else:
101 del addlist[start:end]
114 del addlist[start:end]
102 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
115 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
103 for d in x ])
116 for d in x ])
104
117
105 # if we're using the listcache, make sure it is valid and
118 # if we're using the listcache, make sure it is valid and
106 # parented by the same node we're diffing against
119 # parented by the same node we're diffing against
107 if not changed or not self.listcache or not p1 or \
120 if not changed or not self.listcache or not p1 or \
108 self.mapcache[0] != p1:
121 self.mapcache[0] != p1:
109 files = map.keys()
122 files = map.keys()
110 files.sort()
123 files.sort()
111
124
112 # if this is changed to support newlines in filenames,
125 # if this is changed to support newlines in filenames,
113 # be sure to check the templates/ dir again (especially *-raw.tmpl)
126 # be sure to check the templates/ dir again (especially *-raw.tmpl)
114 text = ["%s\000%s%s\n" %
127 text = ["%s\000%s%s\n" %
115 (f, hex(map[f]), flags[f] and "x" or '')
128 (f, hex(map[f]), flags[f] and "x" or '')
116 for f in files]
129 for f in files]
117 self.listcache = array.array('c', "".join(text))
130 self.listcache = array.array('c', "".join(text))
118 cachedelta = None
131 cachedelta = None
119 else:
132 else:
120 addlist = self.listcache
133 addlist = self.listcache
121
134
122 # combine the changed lists into one list for sorting
135 # combine the changed lists into one list for sorting
123 work = [[x, 0] for x in changed[0]]
136 work = [[x, 0] for x in changed[0]]
124 work[len(work):] = [[x, 1] for x in changed[1]]
137 work[len(work):] = [[x, 1] for x in changed[1]]
125 work.sort()
138 work.sort()
126
139
127 delta = []
140 delta = []
128 dstart = None
141 dstart = None
129 dend = None
142 dend = None
130 dline = [""]
143 dline = [""]
131 start = 0
144 start = 0
132 # zero copy representation of addlist as a buffer
145 # zero copy representation of addlist as a buffer
133 addbuf = buffer(addlist)
146 addbuf = buffer(addlist)
134
147
135 # start with a readonly loop that finds the offset of
148 # start with a readonly loop that finds the offset of
136 # each line and creates the deltas
149 # each line and creates the deltas
137 for w in work:
150 for w in work:
138 f = w[0]
151 f = w[0]
139 # bs will either be the index of the item or the insert point
152 # bs will either be the index of the item or the insert point
140 start, end = manifestsearch(addbuf, f, start)
153 start, end = self._search(addbuf, f, start)
141 if w[1] == 0:
154 if w[1] == 0:
142 l = "%s\000%s%s\n" % (f, hex(map[f]),
155 l = "%s\000%s%s\n" % (f, hex(map[f]),
143 flags[f] and "x" or '')
156 flags[f] and "x" or '')
144 else:
157 else:
145 l = ""
158 l = ""
146 if start == end and w[1] == 1:
159 if start == end and w[1] == 1:
147 # item we want to delete was not found, error out
160 # item we want to delete was not found, error out
148 raise AssertionError(
161 raise AssertionError(
149 _("failed to remove %s from manifest\n") % f)
162 _("failed to remove %s from manifest\n") % f)
150 if dstart != None and dstart <= start and dend >= start:
163 if dstart != None and dstart <= start and dend >= start:
151 if dend < end:
164 if dend < end:
152 dend = end
165 dend = end
153 if l:
166 if l:
154 dline.append(l)
167 dline.append(l)
155 else:
168 else:
156 if dstart != None:
169 if dstart != None:
157 delta.append([dstart, dend, "".join(dline)])
170 delta.append([dstart, dend, "".join(dline)])
158 dstart = start
171 dstart = start
159 dend = end
172 dend = end
160 dline = [l]
173 dline = [l]
161
174
162 if dstart != None:
175 if dstart != None:
163 delta.append([dstart, dend, "".join(dline)])
176 delta.append([dstart, dend, "".join(dline)])
164 # apply the delta to the addlist, and get a delta for addrevision
177 # apply the delta to the addlist, and get a delta for addrevision
165 cachedelta = addlistdelta(addlist, delta)
178 cachedelta = addlistdelta(addlist, delta)
166
179
167 # the delta is only valid if we've been processing the tip revision
180 # the delta is only valid if we've been processing the tip revision
168 if self.mapcache[0] != self.tip():
181 if self.mapcache[0] != self.tip():
169 cachedelta = None
182 cachedelta = None
170 self.listcache = addlist
183 self.listcache = addlist
171
184
172 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
185 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
173 p2, cachedelta)
186 p2, cachedelta)
174 self.mapcache = (n, map, flags)
187 self.mapcache = (n, map, flags)
175
188
176 return n
189 return n
@@ -1,43 +1,62
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 hg id
6 hg id
7 echo a > a
7 echo a > a
8 hg add a
8 hg add a
9 hg commit -m "test" -d "1000000 0"
9 hg commit -m "test" -d "1000000 0"
10 hg co
10 hg co
11 hg identify
11 hg identify
12 T=`hg tip -v | head -n 1 | cut -d : -f 3`
12 T=`hg tip -v | head -n 1 | cut -d : -f 3`
13 echo "$T first" > .hgtags
13 echo "$T first" > .hgtags
14 cat .hgtags
14 cat .hgtags
15 hg add .hgtags
15 hg add .hgtags
16 hg commit -m "add tags" -d "1000000 0"
16 hg commit -m "add tags" -d "1000000 0"
17 hg tags
17 hg tags
18 hg identify
18 hg identify
19 echo bb > a
19 echo bb > a
20 hg status
20 hg status
21 hg identify
21 hg identify
22 hg co first
22 hg co first
23 hg id
23 hg id
24 hg -v id
24 hg -v id
25 hg status
25 hg status
26 echo 1 > b
26 echo 1 > b
27 hg add b
27 hg add b
28 hg commit -m "branch" -d "1000000 0"
28 hg commit -m "branch" -d "1000000 0"
29 hg id
29 hg id
30 hg merge 1
30 hg merge 1
31 hg id
31 hg id
32 hg status
32 hg status
33
33
34 hg commit -m "merge" -d "1000000 0"
34 hg commit -m "merge" -d "1000000 0"
35
36 # create fake head, make sure tag not visible afterwards
37 cp .hgtags tags
38 hg tag -d "1000000 0" last
39 hg rm .hgtags
40 hg commit -m "remove" -d "1000000 0"
41
42 mv tags .hgtags
43 hg add .hgtags
44 hg commit -m "readd" -d "1000000 0"
45
46 hg tags
47
35 # invalid tags
48 # invalid tags
36 echo "spam" >> .hgtags
49 echo "spam" >> .hgtags
37 echo >> .hgtags
50 echo >> .hgtags
38 echo "foo bar" >> .hgtags
51 echo "foo bar" >> .hgtags
39 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
52 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
40 hg commit -m "tags" -d "1000000 0"
53 hg commit -m "tags" -d "1000000 0"
54
55 # report tag parse error on other head
56 hg up 3
57 echo 'x y' >> .hgtags
58 hg commit -m "head" -d "1000000 0"
59
41 hg tags
60 hg tags
42 hg tip
61 hg tip
43
62
@@ -1,32 +1,41
1 unknown
1 unknown
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 0acdaf898367 tip
3 0acdaf898367 tip
4 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
4 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
5 tip 1:8a3ca90d111dc784e6575d373105be12570e8776
5 tip 1:8a3ca90d111dc784e6575d373105be12570e8776
6 first 0:0acdaf8983679e0aac16e811534eb49d7ee1f2b4
6 first 0:0acdaf8983679e0aac16e811534eb49d7ee1f2b4
7 8a3ca90d111d tip
7 8a3ca90d111d tip
8 M a
8 M a
9 8a3ca90d111d+ tip
9 8a3ca90d111d+ tip
10 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
10 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
11 0acdaf898367+ first
11 0acdaf898367+ first
12 0acdaf8983679e0aac16e811534eb49d7ee1f2b4+ first
12 0acdaf8983679e0aac16e811534eb49d7ee1f2b4+ first
13 M a
13 M a
14 8216907a933d tip
14 8216907a933d tip
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 (branch merge, don't forget to commit)
16 (branch merge, don't forget to commit)
17 8216907a933d+8a3ca90d111d+ tip
17 8216907a933d+8a3ca90d111d+ tip
18 M .hgtags
18 M .hgtags
19 .hgtags:2: ignoring invalid tag
19 tip 6:c6af9d771a81bb9c7f267ec03491224a9f8ba1cd
20 .hgtags:4: ignoring invalid tag
21 localtags:1: ignoring invalid tag
22 tip 4:fd868a874787a7b5af31e1675666ce691c803035
23 first 0:0acdaf8983679e0aac16e811534eb49d7ee1f2b4
20 first 0:0acdaf8983679e0aac16e811534eb49d7ee1f2b4
24 changeset: 4:fd868a874787
21 .hgtags (rev 7:39bba1bbbc4c), line 2: cannot parse entry
25 .hgtags:2: ignoring invalid tag
22 .hgtags (rev 7:39bba1bbbc4c), line 4: node 'foo' is not well formed
26 .hgtags:4: ignoring invalid tag
23 localtags, line 1: tag 'invalid' refers to unknown node
27 localtags:1: ignoring invalid tag
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25 .hgtags (rev 7:39bba1bbbc4c), line 2: cannot parse entry
26 .hgtags (rev 7:39bba1bbbc4c), line 4: node 'foo' is not well formed
27 .hgtags (rev 8:4ca6f1b1a68c), line 2: node 'x' is not well formed
28 localtags, line 1: tag 'invalid' refers to unknown node
29 tip 8:4ca6f1b1a68c77be687a03aaeb1614671ba59b20
30 first 0:0acdaf8983679e0aac16e811534eb49d7ee1f2b4
31 changeset: 8:4ca6f1b1a68c
32 .hgtags (rev 7:39bba1bbbc4c), line 2: cannot parse entry
33 .hgtags (rev 7:39bba1bbbc4c), line 4: node 'foo' is not well formed
34 .hgtags (rev 8:4ca6f1b1a68c), line 2: node 'x' is not well formed
35 localtags, line 1: tag 'invalid' refers to unknown node
28 tag: tip
36 tag: tip
37 parent: 3:b2ef3841386b
29 user: test
38 user: test
30 date: Mon Jan 12 13:46:40 1970 +0000
39 date: Mon Jan 12 13:46:40 1970 +0000
31 summary: tags
40 summary: head
32
41
General Comments 0
You need to be logged in to leave comments. Login now