##// END OF EJS Templates
fix calculation of new heads added during push with -r...
Benoit Boissinot -
r3923:27230c29 0.9.3 default
parent child Browse files
Show More
@@ -1,1971 +1,1971 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __del__(self):
21 def __del__(self):
22 self.transhandle = None
22 self.transhandle = None
23 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
24 repo.repository.__init__(self)
24 repo.repository.__init__(self)
25 if not path:
25 if not path:
26 p = os.getcwd()
26 p = os.getcwd()
27 while not os.path.isdir(os.path.join(p, ".hg")):
27 while not os.path.isdir(os.path.join(p, ".hg")):
28 oldp = p
28 oldp = p
29 p = os.path.dirname(p)
29 p = os.path.dirname(p)
30 if p == oldp:
30 if p == oldp:
31 raise repo.RepoError(_("There is no Mercurial repository"
31 raise repo.RepoError(_("There is no Mercurial repository"
32 " here (.hg not found)"))
32 " here (.hg not found)"))
33 path = p
33 path = p
34
34
35 self.path = os.path.join(path, ".hg")
35 self.path = os.path.join(path, ".hg")
36 self.root = os.path.realpath(path)
36 self.root = os.path.realpath(path)
37 self.origroot = path
37 self.origroot = path
38 self.opener = util.opener(self.path)
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
39 self.wopener = util.opener(self.root)
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements = ("revlogv1", "store")
47 requirements = ("revlogv1", "store")
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 v = self.ui.configrevlog()
91 v = self.ui.configrevlog()
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 fl = v.get('flags', None)
94 fl = v.get('flags', None)
95 flags = 0
95 flags = 0
96 if fl != None:
96 if fl != None:
97 for x in fl.split():
97 for x in fl.split():
98 flags |= revlog.flagstr(x)
98 flags |= revlog.flagstr(x)
99 elif self.revlogv1:
99 elif self.revlogv1:
100 flags = revlog.REVLOG_DEFAULT_FLAGS
100 flags = revlog.REVLOG_DEFAULT_FLAGS
101
101
102 v = self.revlogversion | flags
102 v = self.revlogversion | flags
103 self.manifest = manifest.manifest(self.sopener, v)
103 self.manifest = manifest.manifest(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
105
105
106 fallback = self.ui.config('ui', 'fallbackencoding')
106 fallback = self.ui.config('ui', 'fallbackencoding')
107 if fallback:
107 if fallback:
108 util._fallbackencoding = fallback
108 util._fallbackencoding = fallback
109
109
110 # the changelog might not have the inline index flag
110 # the changelog might not have the inline index flag
111 # on. If the format of the changelog is the same as found in
111 # on. If the format of the changelog is the same as found in
112 # .hgrc, apply any flags found in the .hgrc as well.
112 # .hgrc, apply any flags found in the .hgrc as well.
113 # Otherwise, just version from the changelog
113 # Otherwise, just version from the changelog
114 v = self.changelog.version
114 v = self.changelog.version
115 if v == self.revlogversion:
115 if v == self.revlogversion:
116 v |= flags
116 v |= flags
117 self.revlogversion = v
117 self.revlogversion = v
118
118
119 self.tagscache = None
119 self.tagscache = None
120 self.branchcache = None
120 self.branchcache = None
121 self.nodetagscache = None
121 self.nodetagscache = None
122 self.encodepats = None
122 self.encodepats = None
123 self.decodepats = None
123 self.decodepats = None
124 self.transhandle = None
124 self.transhandle = None
125
125
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def url(self):
128 def url(self):
129 return 'file:' + self.root
129 return 'file:' + self.root
130
130
131 def hook(self, name, throw=False, **args):
131 def hook(self, name, throw=False, **args):
132 def callhook(hname, funcname):
132 def callhook(hname, funcname):
133 '''call python hook. hook is callable object, looked up as
133 '''call python hook. hook is callable object, looked up as
134 name in python module. if callable returns "true", hook
134 name in python module. if callable returns "true", hook
135 fails, else passes. if hook raises exception, treated as
135 fails, else passes. if hook raises exception, treated as
136 hook failure. exception propagates if throw is "true".
136 hook failure. exception propagates if throw is "true".
137
137
138 reason for "true" meaning "hook failed" is so that
138 reason for "true" meaning "hook failed" is so that
139 unmodified commands (e.g. mercurial.commands.update) can
139 unmodified commands (e.g. mercurial.commands.update) can
140 be run as hooks without wrappers to convert return values.'''
140 be run as hooks without wrappers to convert return values.'''
141
141
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 d = funcname.rfind('.')
143 d = funcname.rfind('.')
144 if d == -1:
144 if d == -1:
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 % (hname, funcname))
146 % (hname, funcname))
147 modname = funcname[:d]
147 modname = funcname[:d]
148 try:
148 try:
149 obj = __import__(modname)
149 obj = __import__(modname)
150 except ImportError:
150 except ImportError:
151 try:
151 try:
152 # extensions are loaded with hgext_ prefix
152 # extensions are loaded with hgext_ prefix
153 obj = __import__("hgext_%s" % modname)
153 obj = __import__("hgext_%s" % modname)
154 except ImportError:
154 except ImportError:
155 raise util.Abort(_('%s hook is invalid '
155 raise util.Abort(_('%s hook is invalid '
156 '(import of "%s" failed)') %
156 '(import of "%s" failed)') %
157 (hname, modname))
157 (hname, modname))
158 try:
158 try:
159 for p in funcname.split('.')[1:]:
159 for p in funcname.split('.')[1:]:
160 obj = getattr(obj, p)
160 obj = getattr(obj, p)
161 except AttributeError, err:
161 except AttributeError, err:
162 raise util.Abort(_('%s hook is invalid '
162 raise util.Abort(_('%s hook is invalid '
163 '("%s" is not defined)') %
163 '("%s" is not defined)') %
164 (hname, funcname))
164 (hname, funcname))
165 if not callable(obj):
165 if not callable(obj):
166 raise util.Abort(_('%s hook is invalid '
166 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not callable)') %
167 '("%s" is not callable)') %
168 (hname, funcname))
168 (hname, funcname))
169 try:
169 try:
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 except (KeyboardInterrupt, util.SignalInterrupt):
171 except (KeyboardInterrupt, util.SignalInterrupt):
172 raise
172 raise
173 except Exception, exc:
173 except Exception, exc:
174 if isinstance(exc, util.Abort):
174 if isinstance(exc, util.Abort):
175 self.ui.warn(_('error: %s hook failed: %s\n') %
175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 (hname, exc.args[0]))
176 (hname, exc.args[0]))
177 else:
177 else:
178 self.ui.warn(_('error: %s hook raised an exception: '
178 self.ui.warn(_('error: %s hook raised an exception: '
179 '%s\n') % (hname, exc))
179 '%s\n') % (hname, exc))
180 if throw:
180 if throw:
181 raise
181 raise
182 self.ui.print_exc()
182 self.ui.print_exc()
183 return True
183 return True
184 if r:
184 if r:
185 if throw:
185 if throw:
186 raise util.Abort(_('%s hook failed') % hname)
186 raise util.Abort(_('%s hook failed') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 return r
188 return r
189
189
190 def runhook(name, cmd):
190 def runhook(name, cmd):
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 r = util.system(cmd, environ=env, cwd=self.root)
193 r = util.system(cmd, environ=env, cwd=self.root)
194 if r:
194 if r:
195 desc, r = util.explain_exit(r)
195 desc, r = util.explain_exit(r)
196 if throw:
196 if throw:
197 raise util.Abort(_('%s hook %s') % (name, desc))
197 raise util.Abort(_('%s hook %s') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 return r
199 return r
200
200
201 r = False
201 r = False
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 if hname.split(".", 1)[0] == name and cmd]
203 if hname.split(".", 1)[0] == name and cmd]
204 hooks.sort()
204 hooks.sort()
205 for hname, cmd in hooks:
205 for hname, cmd in hooks:
206 if cmd.startswith('python:'):
206 if cmd.startswith('python:'):
207 r = callhook(hname, cmd[7:].strip()) or r
207 r = callhook(hname, cmd[7:].strip()) or r
208 else:
208 else:
209 r = runhook(hname, cmd) or r
209 r = runhook(hname, cmd) or r
210 return r
210 return r
211
211
212 tag_disallowed = ':\r\n'
212 tag_disallowed = ':\r\n'
213
213
214 def tag(self, name, node, message, local, user, date):
214 def tag(self, name, node, message, local, user, date):
215 '''tag a revision with a symbolic name.
215 '''tag a revision with a symbolic name.
216
216
217 if local is True, the tag is stored in a per-repository file.
217 if local is True, the tag is stored in a per-repository file.
218 otherwise, it is stored in the .hgtags file, and a new
218 otherwise, it is stored in the .hgtags file, and a new
219 changeset is committed with the change.
219 changeset is committed with the change.
220
220
221 keyword arguments:
221 keyword arguments:
222
222
223 local: whether to store tag in non-version-controlled file
223 local: whether to store tag in non-version-controlled file
224 (default False)
224 (default False)
225
225
226 message: commit message to use if committing
226 message: commit message to use if committing
227
227
228 user: name of user to use if committing
228 user: name of user to use if committing
229
229
230 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
231
231
232 for c in self.tag_disallowed:
232 for c in self.tag_disallowed:
233 if c in name:
233 if c in name:
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235
235
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237
237
238 if local:
238 if local:
239 # local tags are stored in the current charset
239 # local tags are stored in the current charset
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 self.hook('tag', node=hex(node), tag=name, local=local)
241 self.hook('tag', node=hex(node), tag=name, local=local)
242 return
242 return
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 # committed tags are stored in UTF-8
249 # committed tags are stored in UTF-8
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 self.wfile('.hgtags', 'ab').write(line)
251 self.wfile('.hgtags', 'ab').write(line)
252 if self.dirstate.state('.hgtags') == '?':
252 if self.dirstate.state('.hgtags') == '?':
253 self.add(['.hgtags'])
253 self.add(['.hgtags'])
254
254
255 self.commit(['.hgtags'], message, user, date)
255 self.commit(['.hgtags'], message, user, date)
256 self.hook('tag', node=hex(node), tag=name, local=local)
256 self.hook('tag', node=hex(node), tag=name, local=local)
257
257
258 def tags(self):
258 def tags(self):
259 '''return a mapping of tag to node'''
259 '''return a mapping of tag to node'''
260 if not self.tagscache:
260 if not self.tagscache:
261 self.tagscache = {}
261 self.tagscache = {}
262
262
263 def parsetag(line, context):
263 def parsetag(line, context):
264 if not line:
264 if not line:
265 return
265 return
266 s = l.split(" ", 1)
266 s = l.split(" ", 1)
267 if len(s) != 2:
267 if len(s) != 2:
268 self.ui.warn(_("%s: cannot parse entry\n") % context)
268 self.ui.warn(_("%s: cannot parse entry\n") % context)
269 return
269 return
270 node, key = s
270 node, key = s
271 key = util.tolocal(key.strip()) # stored in UTF-8
271 key = util.tolocal(key.strip()) # stored in UTF-8
272 try:
272 try:
273 bin_n = bin(node)
273 bin_n = bin(node)
274 except TypeError:
274 except TypeError:
275 self.ui.warn(_("%s: node '%s' is not well formed\n") %
275 self.ui.warn(_("%s: node '%s' is not well formed\n") %
276 (context, node))
276 (context, node))
277 return
277 return
278 if bin_n not in self.changelog.nodemap:
278 if bin_n not in self.changelog.nodemap:
279 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
279 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
280 (context, key))
280 (context, key))
281 return
281 return
282 self.tagscache[key] = bin_n
282 self.tagscache[key] = bin_n
283
283
284 # read the tags file from each head, ending with the tip,
284 # read the tags file from each head, ending with the tip,
285 # and add each tag found to the map, with "newer" ones
285 # and add each tag found to the map, with "newer" ones
286 # taking precedence
286 # taking precedence
287 f = None
287 f = None
288 for rev, node, fnode in self._hgtagsnodes():
288 for rev, node, fnode in self._hgtagsnodes():
289 f = (f and f.filectx(fnode) or
289 f = (f and f.filectx(fnode) or
290 self.filectx('.hgtags', fileid=fnode))
290 self.filectx('.hgtags', fileid=fnode))
291 count = 0
291 count = 0
292 for l in f.data().splitlines():
292 for l in f.data().splitlines():
293 count += 1
293 count += 1
294 parsetag(l, _("%s, line %d") % (str(f), count))
294 parsetag(l, _("%s, line %d") % (str(f), count))
295
295
296 try:
296 try:
297 f = self.opener("localtags")
297 f = self.opener("localtags")
298 count = 0
298 count = 0
299 for l in f:
299 for l in f:
300 # localtags are stored in the local character set
300 # localtags are stored in the local character set
301 # while the internal tag table is stored in UTF-8
301 # while the internal tag table is stored in UTF-8
302 l = util.fromlocal(l)
302 l = util.fromlocal(l)
303 count += 1
303 count += 1
304 parsetag(l, _("localtags, line %d") % count)
304 parsetag(l, _("localtags, line %d") % count)
305 except IOError:
305 except IOError:
306 pass
306 pass
307
307
308 self.tagscache['tip'] = self.changelog.tip()
308 self.tagscache['tip'] = self.changelog.tip()
309
309
310 return self.tagscache
310 return self.tagscache
311
311
312 def _hgtagsnodes(self):
312 def _hgtagsnodes(self):
313 heads = self.heads()
313 heads = self.heads()
314 heads.reverse()
314 heads.reverse()
315 last = {}
315 last = {}
316 ret = []
316 ret = []
317 for node in heads:
317 for node in heads:
318 c = self.changectx(node)
318 c = self.changectx(node)
319 rev = c.rev()
319 rev = c.rev()
320 try:
320 try:
321 fnode = c.filenode('.hgtags')
321 fnode = c.filenode('.hgtags')
322 except repo.LookupError:
322 except repo.LookupError:
323 continue
323 continue
324 ret.append((rev, node, fnode))
324 ret.append((rev, node, fnode))
325 if fnode in last:
325 if fnode in last:
326 ret[last[fnode]] = None
326 ret[last[fnode]] = None
327 last[fnode] = len(ret) - 1
327 last[fnode] = len(ret) - 1
328 return [item for item in ret if item]
328 return [item for item in ret if item]
329
329
330 def tagslist(self):
330 def tagslist(self):
331 '''return a list of tags ordered by revision'''
331 '''return a list of tags ordered by revision'''
332 l = []
332 l = []
333 for t, n in self.tags().items():
333 for t, n in self.tags().items():
334 try:
334 try:
335 r = self.changelog.rev(n)
335 r = self.changelog.rev(n)
336 except:
336 except:
337 r = -2 # sort to the beginning of the list if unknown
337 r = -2 # sort to the beginning of the list if unknown
338 l.append((r, t, n))
338 l.append((r, t, n))
339 l.sort()
339 l.sort()
340 return [(t, n) for r, t, n in l]
340 return [(t, n) for r, t, n in l]
341
341
342 def nodetags(self, node):
342 def nodetags(self, node):
343 '''return the tags associated with a node'''
343 '''return the tags associated with a node'''
344 if not self.nodetagscache:
344 if not self.nodetagscache:
345 self.nodetagscache = {}
345 self.nodetagscache = {}
346 for t, n in self.tags().items():
346 for t, n in self.tags().items():
347 self.nodetagscache.setdefault(n, []).append(t)
347 self.nodetagscache.setdefault(n, []).append(t)
348 return self.nodetagscache.get(node, [])
348 return self.nodetagscache.get(node, [])
349
349
350 def _branchtags(self):
350 def _branchtags(self):
351 partial, last, lrev = self._readbranchcache()
351 partial, last, lrev = self._readbranchcache()
352
352
353 tiprev = self.changelog.count() - 1
353 tiprev = self.changelog.count() - 1
354 if lrev != tiprev:
354 if lrev != tiprev:
355 self._updatebranchcache(partial, lrev+1, tiprev+1)
355 self._updatebranchcache(partial, lrev+1, tiprev+1)
356 self._writebranchcache(partial, self.changelog.tip(), tiprev)
356 self._writebranchcache(partial, self.changelog.tip(), tiprev)
357
357
358 return partial
358 return partial
359
359
360 def branchtags(self):
360 def branchtags(self):
361 if self.branchcache is not None:
361 if self.branchcache is not None:
362 return self.branchcache
362 return self.branchcache
363
363
364 self.branchcache = {} # avoid recursion in changectx
364 self.branchcache = {} # avoid recursion in changectx
365 partial = self._branchtags()
365 partial = self._branchtags()
366
366
367 # the branch cache is stored on disk as UTF-8, but in the local
367 # the branch cache is stored on disk as UTF-8, but in the local
368 # charset internally
368 # charset internally
369 for k, v in partial.items():
369 for k, v in partial.items():
370 self.branchcache[util.tolocal(k)] = v
370 self.branchcache[util.tolocal(k)] = v
371 return self.branchcache
371 return self.branchcache
372
372
373 def _readbranchcache(self):
373 def _readbranchcache(self):
374 partial = {}
374 partial = {}
375 try:
375 try:
376 f = self.opener("branches.cache")
376 f = self.opener("branches.cache")
377 lines = f.read().split('\n')
377 lines = f.read().split('\n')
378 f.close()
378 f.close()
379 last, lrev = lines.pop(0).rstrip().split(" ", 1)
379 last, lrev = lines.pop(0).rstrip().split(" ", 1)
380 last, lrev = bin(last), int(lrev)
380 last, lrev = bin(last), int(lrev)
381 if not (lrev < self.changelog.count() and
381 if not (lrev < self.changelog.count() and
382 self.changelog.node(lrev) == last): # sanity check
382 self.changelog.node(lrev) == last): # sanity check
383 # invalidate the cache
383 # invalidate the cache
384 raise ValueError('Invalid branch cache: unknown tip')
384 raise ValueError('Invalid branch cache: unknown tip')
385 for l in lines:
385 for l in lines:
386 if not l: continue
386 if not l: continue
387 node, label = l.rstrip().split(" ", 1)
387 node, label = l.rstrip().split(" ", 1)
388 partial[label] = bin(node)
388 partial[label] = bin(node)
389 except (KeyboardInterrupt, util.SignalInterrupt):
389 except (KeyboardInterrupt, util.SignalInterrupt):
390 raise
390 raise
391 except Exception, inst:
391 except Exception, inst:
392 if self.ui.debugflag:
392 if self.ui.debugflag:
393 self.ui.warn(str(inst), '\n')
393 self.ui.warn(str(inst), '\n')
394 partial, last, lrev = {}, nullid, nullrev
394 partial, last, lrev = {}, nullid, nullrev
395 return partial, last, lrev
395 return partial, last, lrev
396
396
397 def _writebranchcache(self, branches, tip, tiprev):
397 def _writebranchcache(self, branches, tip, tiprev):
398 try:
398 try:
399 f = self.opener("branches.cache", "w")
399 f = self.opener("branches.cache", "w")
400 f.write("%s %s\n" % (hex(tip), tiprev))
400 f.write("%s %s\n" % (hex(tip), tiprev))
401 for label, node in branches.iteritems():
401 for label, node in branches.iteritems():
402 f.write("%s %s\n" % (hex(node), label))
402 f.write("%s %s\n" % (hex(node), label))
403 except IOError:
403 except IOError:
404 pass
404 pass
405
405
406 def _updatebranchcache(self, partial, start, end):
406 def _updatebranchcache(self, partial, start, end):
407 for r in xrange(start, end):
407 for r in xrange(start, end):
408 c = self.changectx(r)
408 c = self.changectx(r)
409 b = c.branch()
409 b = c.branch()
410 if b:
410 if b:
411 partial[b] = c.node()
411 partial[b] = c.node()
412
412
413 def lookup(self, key):
413 def lookup(self, key):
414 if key == '.':
414 if key == '.':
415 key = self.dirstate.parents()[0]
415 key = self.dirstate.parents()[0]
416 if key == nullid:
416 if key == nullid:
417 raise repo.RepoError(_("no revision checked out"))
417 raise repo.RepoError(_("no revision checked out"))
418 elif key == 'null':
418 elif key == 'null':
419 return nullid
419 return nullid
420 n = self.changelog._match(key)
420 n = self.changelog._match(key)
421 if n:
421 if n:
422 return n
422 return n
423 if key in self.tags():
423 if key in self.tags():
424 return self.tags()[key]
424 return self.tags()[key]
425 if key in self.branchtags():
425 if key in self.branchtags():
426 return self.branchtags()[key]
426 return self.branchtags()[key]
427 n = self.changelog._partialmatch(key)
427 n = self.changelog._partialmatch(key)
428 if n:
428 if n:
429 return n
429 return n
430 raise repo.RepoError(_("unknown revision '%s'") % key)
430 raise repo.RepoError(_("unknown revision '%s'") % key)
431
431
432 def dev(self):
432 def dev(self):
433 return os.lstat(self.path).st_dev
433 return os.lstat(self.path).st_dev
434
434
435 def local(self):
435 def local(self):
436 return True
436 return True
437
437
438 def join(self, f):
438 def join(self, f):
439 return os.path.join(self.path, f)
439 return os.path.join(self.path, f)
440
440
441 def sjoin(self, f):
441 def sjoin(self, f):
442 f = self.encodefn(f)
442 f = self.encodefn(f)
443 return os.path.join(self.spath, f)
443 return os.path.join(self.spath, f)
444
444
445 def wjoin(self, f):
445 def wjoin(self, f):
446 return os.path.join(self.root, f)
446 return os.path.join(self.root, f)
447
447
448 def file(self, f):
448 def file(self, f):
449 if f[0] == '/':
449 if f[0] == '/':
450 f = f[1:]
450 f = f[1:]
451 return filelog.filelog(self.sopener, f, self.revlogversion)
451 return filelog.filelog(self.sopener, f, self.revlogversion)
452
452
453 def changectx(self, changeid=None):
453 def changectx(self, changeid=None):
454 return context.changectx(self, changeid)
454 return context.changectx(self, changeid)
455
455
456 def workingctx(self):
456 def workingctx(self):
457 return context.workingctx(self)
457 return context.workingctx(self)
458
458
459 def parents(self, changeid=None):
459 def parents(self, changeid=None):
460 '''
460 '''
461 get list of changectxs for parents of changeid or working directory
461 get list of changectxs for parents of changeid or working directory
462 '''
462 '''
463 if changeid is None:
463 if changeid is None:
464 pl = self.dirstate.parents()
464 pl = self.dirstate.parents()
465 else:
465 else:
466 n = self.changelog.lookup(changeid)
466 n = self.changelog.lookup(changeid)
467 pl = self.changelog.parents(n)
467 pl = self.changelog.parents(n)
468 if pl[1] == nullid:
468 if pl[1] == nullid:
469 return [self.changectx(pl[0])]
469 return [self.changectx(pl[0])]
470 return [self.changectx(pl[0]), self.changectx(pl[1])]
470 return [self.changectx(pl[0]), self.changectx(pl[1])]
471
471
472 def filectx(self, path, changeid=None, fileid=None):
472 def filectx(self, path, changeid=None, fileid=None):
473 """changeid can be a changeset revision, node, or tag.
473 """changeid can be a changeset revision, node, or tag.
474 fileid can be a file revision or node."""
474 fileid can be a file revision or node."""
475 return context.filectx(self, path, changeid, fileid)
475 return context.filectx(self, path, changeid, fileid)
476
476
477 def getcwd(self):
477 def getcwd(self):
478 return self.dirstate.getcwd()
478 return self.dirstate.getcwd()
479
479
480 def wfile(self, f, mode='r'):
480 def wfile(self, f, mode='r'):
481 return self.wopener(f, mode)
481 return self.wopener(f, mode)
482
482
483 def wread(self, filename):
483 def wread(self, filename):
484 if self.encodepats == None:
484 if self.encodepats == None:
485 l = []
485 l = []
486 for pat, cmd in self.ui.configitems("encode"):
486 for pat, cmd in self.ui.configitems("encode"):
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 l.append((mf, cmd))
488 l.append((mf, cmd))
489 self.encodepats = l
489 self.encodepats = l
490
490
491 data = self.wopener(filename, 'r').read()
491 data = self.wopener(filename, 'r').read()
492
492
493 for mf, cmd in self.encodepats:
493 for mf, cmd in self.encodepats:
494 if mf(filename):
494 if mf(filename):
495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
496 data = util.filter(data, cmd)
496 data = util.filter(data, cmd)
497 break
497 break
498
498
499 return data
499 return data
500
500
501 def wwrite(self, filename, data, fd=None):
501 def wwrite(self, filename, data, fd=None):
502 if self.decodepats == None:
502 if self.decodepats == None:
503 l = []
503 l = []
504 for pat, cmd in self.ui.configitems("decode"):
504 for pat, cmd in self.ui.configitems("decode"):
505 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 mf = util.matcher(self.root, "", [pat], [], [])[1]
506 l.append((mf, cmd))
506 l.append((mf, cmd))
507 self.decodepats = l
507 self.decodepats = l
508
508
509 for mf, cmd in self.decodepats:
509 for mf, cmd in self.decodepats:
510 if mf(filename):
510 if mf(filename):
511 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
511 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
512 data = util.filter(data, cmd)
512 data = util.filter(data, cmd)
513 break
513 break
514
514
515 if fd:
515 if fd:
516 return fd.write(data)
516 return fd.write(data)
517 return self.wopener(filename, 'w').write(data)
517 return self.wopener(filename, 'w').write(data)
518
518
519 def transaction(self):
519 def transaction(self):
520 tr = self.transhandle
520 tr = self.transhandle
521 if tr != None and tr.running():
521 if tr != None and tr.running():
522 return tr.nest()
522 return tr.nest()
523
523
524 # save dirstate for rollback
524 # save dirstate for rollback
525 try:
525 try:
526 ds = self.opener("dirstate").read()
526 ds = self.opener("dirstate").read()
527 except IOError:
527 except IOError:
528 ds = ""
528 ds = ""
529 self.opener("journal.dirstate", "w").write(ds)
529 self.opener("journal.dirstate", "w").write(ds)
530
530
531 renames = [(self.sjoin("journal"), self.sjoin("undo")),
531 renames = [(self.sjoin("journal"), self.sjoin("undo")),
532 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
532 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
533 tr = transaction.transaction(self.ui.warn, self.sopener,
533 tr = transaction.transaction(self.ui.warn, self.sopener,
534 self.sjoin("journal"),
534 self.sjoin("journal"),
535 aftertrans(renames))
535 aftertrans(renames))
536 self.transhandle = tr
536 self.transhandle = tr
537 return tr
537 return tr
538
538
539 def recover(self):
539 def recover(self):
540 l = self.lock()
540 l = self.lock()
541 if os.path.exists(self.sjoin("journal")):
541 if os.path.exists(self.sjoin("journal")):
542 self.ui.status(_("rolling back interrupted transaction\n"))
542 self.ui.status(_("rolling back interrupted transaction\n"))
543 transaction.rollback(self.sopener, self.sjoin("journal"))
543 transaction.rollback(self.sopener, self.sjoin("journal"))
544 self.reload()
544 self.reload()
545 return True
545 return True
546 else:
546 else:
547 self.ui.warn(_("no interrupted transaction available\n"))
547 self.ui.warn(_("no interrupted transaction available\n"))
548 return False
548 return False
549
549
550 def rollback(self, wlock=None):
550 def rollback(self, wlock=None):
551 if not wlock:
551 if not wlock:
552 wlock = self.wlock()
552 wlock = self.wlock()
553 l = self.lock()
553 l = self.lock()
554 if os.path.exists(self.sjoin("undo")):
554 if os.path.exists(self.sjoin("undo")):
555 self.ui.status(_("rolling back last transaction\n"))
555 self.ui.status(_("rolling back last transaction\n"))
556 transaction.rollback(self.sopener, self.sjoin("undo"))
556 transaction.rollback(self.sopener, self.sjoin("undo"))
557 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
557 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
558 self.reload()
558 self.reload()
559 self.wreload()
559 self.wreload()
560 else:
560 else:
561 self.ui.warn(_("no rollback information available\n"))
561 self.ui.warn(_("no rollback information available\n"))
562
562
563 def wreload(self):
563 def wreload(self):
564 self.dirstate.read()
564 self.dirstate.read()
565
565
566 def reload(self):
566 def reload(self):
567 self.changelog.load()
567 self.changelog.load()
568 self.manifest.load()
568 self.manifest.load()
569 self.tagscache = None
569 self.tagscache = None
570 self.nodetagscache = None
570 self.nodetagscache = None
571
571
572 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
572 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
573 desc=None):
573 desc=None):
574 try:
574 try:
575 l = lock.lock(lockname, 0, releasefn, desc=desc)
575 l = lock.lock(lockname, 0, releasefn, desc=desc)
576 except lock.LockHeld, inst:
576 except lock.LockHeld, inst:
577 if not wait:
577 if not wait:
578 raise
578 raise
579 self.ui.warn(_("waiting for lock on %s held by %r\n") %
579 self.ui.warn(_("waiting for lock on %s held by %r\n") %
580 (desc, inst.locker))
580 (desc, inst.locker))
581 # default to 600 seconds timeout
581 # default to 600 seconds timeout
582 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
582 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
583 releasefn, desc=desc)
583 releasefn, desc=desc)
584 if acquirefn:
584 if acquirefn:
585 acquirefn()
585 acquirefn()
586 return l
586 return l
587
587
588 def lock(self, wait=1):
588 def lock(self, wait=1):
589 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
589 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
590 desc=_('repository %s') % self.origroot)
590 desc=_('repository %s') % self.origroot)
591
591
592 def wlock(self, wait=1):
592 def wlock(self, wait=1):
593 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
593 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
594 self.wreload,
594 self.wreload,
595 desc=_('working directory of %s') % self.origroot)
595 desc=_('working directory of %s') % self.origroot)
596
596
597 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
597 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
598 """
598 """
599 commit an individual file as part of a larger transaction
599 commit an individual file as part of a larger transaction
600 """
600 """
601
601
602 t = self.wread(fn)
602 t = self.wread(fn)
603 fl = self.file(fn)
603 fl = self.file(fn)
604 fp1 = manifest1.get(fn, nullid)
604 fp1 = manifest1.get(fn, nullid)
605 fp2 = manifest2.get(fn, nullid)
605 fp2 = manifest2.get(fn, nullid)
606
606
607 meta = {}
607 meta = {}
608 cp = self.dirstate.copied(fn)
608 cp = self.dirstate.copied(fn)
609 if cp:
609 if cp:
610 meta["copy"] = cp
610 meta["copy"] = cp
611 if not manifest2: # not a branch merge
611 if not manifest2: # not a branch merge
612 meta["copyrev"] = hex(manifest1.get(cp, nullid))
612 meta["copyrev"] = hex(manifest1.get(cp, nullid))
613 fp2 = nullid
613 fp2 = nullid
614 elif fp2 != nullid: # copied on remote side
614 elif fp2 != nullid: # copied on remote side
615 meta["copyrev"] = hex(manifest1.get(cp, nullid))
615 meta["copyrev"] = hex(manifest1.get(cp, nullid))
616 elif fp1 != nullid: # copied on local side, reversed
616 elif fp1 != nullid: # copied on local side, reversed
617 meta["copyrev"] = hex(manifest2.get(cp))
617 meta["copyrev"] = hex(manifest2.get(cp))
618 fp2 = nullid
618 fp2 = nullid
619 else: # directory rename
619 else: # directory rename
620 meta["copyrev"] = hex(manifest1.get(cp, nullid))
620 meta["copyrev"] = hex(manifest1.get(cp, nullid))
621 self.ui.debug(_(" %s: copy %s:%s\n") %
621 self.ui.debug(_(" %s: copy %s:%s\n") %
622 (fn, cp, meta["copyrev"]))
622 (fn, cp, meta["copyrev"]))
623 fp1 = nullid
623 fp1 = nullid
624 elif fp2 != nullid:
624 elif fp2 != nullid:
625 # is one parent an ancestor of the other?
625 # is one parent an ancestor of the other?
626 fpa = fl.ancestor(fp1, fp2)
626 fpa = fl.ancestor(fp1, fp2)
627 if fpa == fp1:
627 if fpa == fp1:
628 fp1, fp2 = fp2, nullid
628 fp1, fp2 = fp2, nullid
629 elif fpa == fp2:
629 elif fpa == fp2:
630 fp2 = nullid
630 fp2 = nullid
631
631
632 # is the file unmodified from the parent? report existing entry
632 # is the file unmodified from the parent? report existing entry
633 if fp2 == nullid and not fl.cmp(fp1, t):
633 if fp2 == nullid and not fl.cmp(fp1, t):
634 return fp1
634 return fp1
635
635
636 changelist.append(fn)
636 changelist.append(fn)
637 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
637 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
638
638
639 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
639 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
640 if p1 is None:
640 if p1 is None:
641 p1, p2 = self.dirstate.parents()
641 p1, p2 = self.dirstate.parents()
642 return self.commit(files=files, text=text, user=user, date=date,
642 return self.commit(files=files, text=text, user=user, date=date,
643 p1=p1, p2=p2, wlock=wlock)
643 p1=p1, p2=p2, wlock=wlock)
644
644
645 def commit(self, files=None, text="", user=None, date=None,
645 def commit(self, files=None, text="", user=None, date=None,
646 match=util.always, force=False, lock=None, wlock=None,
646 match=util.always, force=False, lock=None, wlock=None,
647 force_editor=False, p1=None, p2=None, extra={}):
647 force_editor=False, p1=None, p2=None, extra={}):
648
648
649 commit = []
649 commit = []
650 remove = []
650 remove = []
651 changed = []
651 changed = []
652 use_dirstate = (p1 is None) # not rawcommit
652 use_dirstate = (p1 is None) # not rawcommit
653 extra = extra.copy()
653 extra = extra.copy()
654
654
655 if use_dirstate:
655 if use_dirstate:
656 if files:
656 if files:
657 for f in files:
657 for f in files:
658 s = self.dirstate.state(f)
658 s = self.dirstate.state(f)
659 if s in 'nmai':
659 if s in 'nmai':
660 commit.append(f)
660 commit.append(f)
661 elif s == 'r':
661 elif s == 'r':
662 remove.append(f)
662 remove.append(f)
663 else:
663 else:
664 self.ui.warn(_("%s not tracked!\n") % f)
664 self.ui.warn(_("%s not tracked!\n") % f)
665 else:
665 else:
666 changes = self.status(match=match)[:5]
666 changes = self.status(match=match)[:5]
667 modified, added, removed, deleted, unknown = changes
667 modified, added, removed, deleted, unknown = changes
668 commit = modified + added
668 commit = modified + added
669 remove = removed
669 remove = removed
670 else:
670 else:
671 commit = files
671 commit = files
672
672
673 if use_dirstate:
673 if use_dirstate:
674 p1, p2 = self.dirstate.parents()
674 p1, p2 = self.dirstate.parents()
675 update_dirstate = True
675 update_dirstate = True
676 else:
676 else:
677 p1, p2 = p1, p2 or nullid
677 p1, p2 = p1, p2 or nullid
678 update_dirstate = (self.dirstate.parents()[0] == p1)
678 update_dirstate = (self.dirstate.parents()[0] == p1)
679
679
680 c1 = self.changelog.read(p1)
680 c1 = self.changelog.read(p1)
681 c2 = self.changelog.read(p2)
681 c2 = self.changelog.read(p2)
682 m1 = self.manifest.read(c1[0]).copy()
682 m1 = self.manifest.read(c1[0]).copy()
683 m2 = self.manifest.read(c2[0])
683 m2 = self.manifest.read(c2[0])
684
684
685 if use_dirstate:
685 if use_dirstate:
686 branchname = self.workingctx().branch()
686 branchname = self.workingctx().branch()
687 try:
687 try:
688 branchname = branchname.decode('UTF-8').encode('UTF-8')
688 branchname = branchname.decode('UTF-8').encode('UTF-8')
689 except UnicodeDecodeError:
689 except UnicodeDecodeError:
690 raise util.Abort(_('branch name not in UTF-8!'))
690 raise util.Abort(_('branch name not in UTF-8!'))
691 else:
691 else:
692 branchname = ""
692 branchname = ""
693
693
694 if use_dirstate:
694 if use_dirstate:
695 oldname = c1[5].get("branch", "") # stored in UTF-8
695 oldname = c1[5].get("branch", "") # stored in UTF-8
696 if not commit and not remove and not force and p2 == nullid and \
696 if not commit and not remove and not force and p2 == nullid and \
697 branchname == oldname:
697 branchname == oldname:
698 self.ui.status(_("nothing changed\n"))
698 self.ui.status(_("nothing changed\n"))
699 return None
699 return None
700
700
701 xp1 = hex(p1)
701 xp1 = hex(p1)
702 if p2 == nullid: xp2 = ''
702 if p2 == nullid: xp2 = ''
703 else: xp2 = hex(p2)
703 else: xp2 = hex(p2)
704
704
705 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
705 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
706
706
707 if not wlock:
707 if not wlock:
708 wlock = self.wlock()
708 wlock = self.wlock()
709 if not lock:
709 if not lock:
710 lock = self.lock()
710 lock = self.lock()
711 tr = self.transaction()
711 tr = self.transaction()
712
712
713 # check in files
713 # check in files
714 new = {}
714 new = {}
715 linkrev = self.changelog.count()
715 linkrev = self.changelog.count()
716 commit.sort()
716 commit.sort()
717 for f in commit:
717 for f in commit:
718 self.ui.note(f + "\n")
718 self.ui.note(f + "\n")
719 try:
719 try:
720 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
720 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
721 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
721 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
722 except IOError:
722 except IOError:
723 if use_dirstate:
723 if use_dirstate:
724 self.ui.warn(_("trouble committing %s!\n") % f)
724 self.ui.warn(_("trouble committing %s!\n") % f)
725 raise
725 raise
726 else:
726 else:
727 remove.append(f)
727 remove.append(f)
728
728
729 # update manifest
729 # update manifest
730 m1.update(new)
730 m1.update(new)
731 remove.sort()
731 remove.sort()
732
732
733 for f in remove:
733 for f in remove:
734 if f in m1:
734 if f in m1:
735 del m1[f]
735 del m1[f]
736 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
736 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
737
737
738 # add changeset
738 # add changeset
739 new = new.keys()
739 new = new.keys()
740 new.sort()
740 new.sort()
741
741
742 user = user or self.ui.username()
742 user = user or self.ui.username()
743 if not text or force_editor:
743 if not text or force_editor:
744 edittext = []
744 edittext = []
745 if text:
745 if text:
746 edittext.append(text)
746 edittext.append(text)
747 edittext.append("")
747 edittext.append("")
748 edittext.append("HG: user: %s" % user)
748 edittext.append("HG: user: %s" % user)
749 if p2 != nullid:
749 if p2 != nullid:
750 edittext.append("HG: branch merge")
750 edittext.append("HG: branch merge")
751 edittext.extend(["HG: changed %s" % f for f in changed])
751 edittext.extend(["HG: changed %s" % f for f in changed])
752 edittext.extend(["HG: removed %s" % f for f in remove])
752 edittext.extend(["HG: removed %s" % f for f in remove])
753 if not changed and not remove:
753 if not changed and not remove:
754 edittext.append("HG: no files changed")
754 edittext.append("HG: no files changed")
755 edittext.append("")
755 edittext.append("")
756 # run editor in the repository root
756 # run editor in the repository root
757 olddir = os.getcwd()
757 olddir = os.getcwd()
758 os.chdir(self.root)
758 os.chdir(self.root)
759 text = self.ui.edit("\n".join(edittext), user)
759 text = self.ui.edit("\n".join(edittext), user)
760 os.chdir(olddir)
760 os.chdir(olddir)
761
761
762 lines = [line.rstrip() for line in text.rstrip().splitlines()]
762 lines = [line.rstrip() for line in text.rstrip().splitlines()]
763 while lines and not lines[0]:
763 while lines and not lines[0]:
764 del lines[0]
764 del lines[0]
765 if not lines:
765 if not lines:
766 return None
766 return None
767 text = '\n'.join(lines)
767 text = '\n'.join(lines)
768 if branchname:
768 if branchname:
769 extra["branch"] = branchname
769 extra["branch"] = branchname
770 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
770 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
771 user, date, extra)
771 user, date, extra)
772 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
772 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
773 parent2=xp2)
773 parent2=xp2)
774 tr.close()
774 tr.close()
775
775
776 if use_dirstate or update_dirstate:
776 if use_dirstate or update_dirstate:
777 self.dirstate.setparents(n)
777 self.dirstate.setparents(n)
778 if use_dirstate:
778 if use_dirstate:
779 self.dirstate.update(new, "n")
779 self.dirstate.update(new, "n")
780 self.dirstate.forget(remove)
780 self.dirstate.forget(remove)
781
781
782 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
782 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
783 return n
783 return n
784
784
785 def walk(self, node=None, files=[], match=util.always, badmatch=None):
785 def walk(self, node=None, files=[], match=util.always, badmatch=None):
786 '''
786 '''
787 walk recursively through the directory tree or a given
787 walk recursively through the directory tree or a given
788 changeset, finding all files matched by the match
788 changeset, finding all files matched by the match
789 function
789 function
790
790
791 results are yielded in a tuple (src, filename), where src
791 results are yielded in a tuple (src, filename), where src
792 is one of:
792 is one of:
793 'f' the file was found in the directory tree
793 'f' the file was found in the directory tree
794 'm' the file was only in the dirstate and not in the tree
794 'm' the file was only in the dirstate and not in the tree
795 'b' file was not found and matched badmatch
795 'b' file was not found and matched badmatch
796 '''
796 '''
797
797
798 if node:
798 if node:
799 fdict = dict.fromkeys(files)
799 fdict = dict.fromkeys(files)
800 for fn in self.manifest.read(self.changelog.read(node)[0]):
800 for fn in self.manifest.read(self.changelog.read(node)[0]):
801 for ffn in fdict:
801 for ffn in fdict:
802 # match if the file is the exact name or a directory
802 # match if the file is the exact name or a directory
803 if ffn == fn or fn.startswith("%s/" % ffn):
803 if ffn == fn or fn.startswith("%s/" % ffn):
804 del fdict[ffn]
804 del fdict[ffn]
805 break
805 break
806 if match(fn):
806 if match(fn):
807 yield 'm', fn
807 yield 'm', fn
808 for fn in fdict:
808 for fn in fdict:
809 if badmatch and badmatch(fn):
809 if badmatch and badmatch(fn):
810 if match(fn):
810 if match(fn):
811 yield 'b', fn
811 yield 'b', fn
812 else:
812 else:
813 self.ui.warn(_('%s: No such file in rev %s\n') % (
813 self.ui.warn(_('%s: No such file in rev %s\n') % (
814 util.pathto(self.getcwd(), fn), short(node)))
814 util.pathto(self.getcwd(), fn), short(node)))
815 else:
815 else:
816 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
816 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
817 yield src, fn
817 yield src, fn
818
818
819 def status(self, node1=None, node2=None, files=[], match=util.always,
819 def status(self, node1=None, node2=None, files=[], match=util.always,
820 wlock=None, list_ignored=False, list_clean=False):
820 wlock=None, list_ignored=False, list_clean=False):
821 """return status of files between two nodes or node and working directory
821 """return status of files between two nodes or node and working directory
822
822
823 If node1 is None, use the first dirstate parent instead.
823 If node1 is None, use the first dirstate parent instead.
824 If node2 is None, compare node1 with working directory.
824 If node2 is None, compare node1 with working directory.
825 """
825 """
826
826
827 def fcmp(fn, mf):
827 def fcmp(fn, mf):
828 t1 = self.wread(fn)
828 t1 = self.wread(fn)
829 return self.file(fn).cmp(mf.get(fn, nullid), t1)
829 return self.file(fn).cmp(mf.get(fn, nullid), t1)
830
830
831 def mfmatches(node):
831 def mfmatches(node):
832 change = self.changelog.read(node)
832 change = self.changelog.read(node)
833 mf = self.manifest.read(change[0]).copy()
833 mf = self.manifest.read(change[0]).copy()
834 for fn in mf.keys():
834 for fn in mf.keys():
835 if not match(fn):
835 if not match(fn):
836 del mf[fn]
836 del mf[fn]
837 return mf
837 return mf
838
838
839 modified, added, removed, deleted, unknown = [], [], [], [], []
839 modified, added, removed, deleted, unknown = [], [], [], [], []
840 ignored, clean = [], []
840 ignored, clean = [], []
841
841
842 compareworking = False
842 compareworking = False
843 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
843 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
844 compareworking = True
844 compareworking = True
845
845
846 if not compareworking:
846 if not compareworking:
847 # read the manifest from node1 before the manifest from node2,
847 # read the manifest from node1 before the manifest from node2,
848 # so that we'll hit the manifest cache if we're going through
848 # so that we'll hit the manifest cache if we're going through
849 # all the revisions in parent->child order.
849 # all the revisions in parent->child order.
850 mf1 = mfmatches(node1)
850 mf1 = mfmatches(node1)
851
851
852 # are we comparing the working directory?
852 # are we comparing the working directory?
853 if not node2:
853 if not node2:
854 if not wlock:
854 if not wlock:
855 try:
855 try:
856 wlock = self.wlock(wait=0)
856 wlock = self.wlock(wait=0)
857 except lock.LockException:
857 except lock.LockException:
858 wlock = None
858 wlock = None
859 (lookup, modified, added, removed, deleted, unknown,
859 (lookup, modified, added, removed, deleted, unknown,
860 ignored, clean) = self.dirstate.status(files, match,
860 ignored, clean) = self.dirstate.status(files, match,
861 list_ignored, list_clean)
861 list_ignored, list_clean)
862
862
863 # are we comparing working dir against its parent?
863 # are we comparing working dir against its parent?
864 if compareworking:
864 if compareworking:
865 if lookup:
865 if lookup:
866 # do a full compare of any files that might have changed
866 # do a full compare of any files that might have changed
867 mf2 = mfmatches(self.dirstate.parents()[0])
867 mf2 = mfmatches(self.dirstate.parents()[0])
868 for f in lookup:
868 for f in lookup:
869 if fcmp(f, mf2):
869 if fcmp(f, mf2):
870 modified.append(f)
870 modified.append(f)
871 else:
871 else:
872 clean.append(f)
872 clean.append(f)
873 if wlock is not None:
873 if wlock is not None:
874 self.dirstate.update([f], "n")
874 self.dirstate.update([f], "n")
875 else:
875 else:
876 # we are comparing working dir against non-parent
876 # we are comparing working dir against non-parent
877 # generate a pseudo-manifest for the working dir
877 # generate a pseudo-manifest for the working dir
878 # XXX: create it in dirstate.py ?
878 # XXX: create it in dirstate.py ?
879 mf2 = mfmatches(self.dirstate.parents()[0])
879 mf2 = mfmatches(self.dirstate.parents()[0])
880 for f in lookup + modified + added:
880 for f in lookup + modified + added:
881 mf2[f] = ""
881 mf2[f] = ""
882 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
882 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
883 for f in removed:
883 for f in removed:
884 if f in mf2:
884 if f in mf2:
885 del mf2[f]
885 del mf2[f]
886 else:
886 else:
887 # we are comparing two revisions
887 # we are comparing two revisions
888 mf2 = mfmatches(node2)
888 mf2 = mfmatches(node2)
889
889
890 if not compareworking:
890 if not compareworking:
891 # flush lists from dirstate before comparing manifests
891 # flush lists from dirstate before comparing manifests
892 modified, added, clean = [], [], []
892 modified, added, clean = [], [], []
893
893
894 # make sure to sort the files so we talk to the disk in a
894 # make sure to sort the files so we talk to the disk in a
895 # reasonable order
895 # reasonable order
896 mf2keys = mf2.keys()
896 mf2keys = mf2.keys()
897 mf2keys.sort()
897 mf2keys.sort()
898 for fn in mf2keys:
898 for fn in mf2keys:
899 if mf1.has_key(fn):
899 if mf1.has_key(fn):
900 if mf1.flags(fn) != mf2.flags(fn) or \
900 if mf1.flags(fn) != mf2.flags(fn) or \
901 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
901 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
902 modified.append(fn)
902 modified.append(fn)
903 elif list_clean:
903 elif list_clean:
904 clean.append(fn)
904 clean.append(fn)
905 del mf1[fn]
905 del mf1[fn]
906 else:
906 else:
907 added.append(fn)
907 added.append(fn)
908
908
909 removed = mf1.keys()
909 removed = mf1.keys()
910
910
911 # sort and return results:
911 # sort and return results:
912 for l in modified, added, removed, deleted, unknown, ignored, clean:
912 for l in modified, added, removed, deleted, unknown, ignored, clean:
913 l.sort()
913 l.sort()
914 return (modified, added, removed, deleted, unknown, ignored, clean)
914 return (modified, added, removed, deleted, unknown, ignored, clean)
915
915
916 def add(self, list, wlock=None):
916 def add(self, list, wlock=None):
917 if not wlock:
917 if not wlock:
918 wlock = self.wlock()
918 wlock = self.wlock()
919 for f in list:
919 for f in list:
920 p = self.wjoin(f)
920 p = self.wjoin(f)
921 if not os.path.exists(p):
921 if not os.path.exists(p):
922 self.ui.warn(_("%s does not exist!\n") % f)
922 self.ui.warn(_("%s does not exist!\n") % f)
923 elif not os.path.isfile(p):
923 elif not os.path.isfile(p):
924 self.ui.warn(_("%s not added: only files supported currently\n")
924 self.ui.warn(_("%s not added: only files supported currently\n")
925 % f)
925 % f)
926 elif self.dirstate.state(f) in 'an':
926 elif self.dirstate.state(f) in 'an':
927 self.ui.warn(_("%s already tracked!\n") % f)
927 self.ui.warn(_("%s already tracked!\n") % f)
928 else:
928 else:
929 self.dirstate.update([f], "a")
929 self.dirstate.update([f], "a")
930
930
931 def forget(self, list, wlock=None):
931 def forget(self, list, wlock=None):
932 if not wlock:
932 if not wlock:
933 wlock = self.wlock()
933 wlock = self.wlock()
934 for f in list:
934 for f in list:
935 if self.dirstate.state(f) not in 'ai':
935 if self.dirstate.state(f) not in 'ai':
936 self.ui.warn(_("%s not added!\n") % f)
936 self.ui.warn(_("%s not added!\n") % f)
937 else:
937 else:
938 self.dirstate.forget([f])
938 self.dirstate.forget([f])
939
939
940 def remove(self, list, unlink=False, wlock=None):
940 def remove(self, list, unlink=False, wlock=None):
941 if unlink:
941 if unlink:
942 for f in list:
942 for f in list:
943 try:
943 try:
944 util.unlink(self.wjoin(f))
944 util.unlink(self.wjoin(f))
945 except OSError, inst:
945 except OSError, inst:
946 if inst.errno != errno.ENOENT:
946 if inst.errno != errno.ENOENT:
947 raise
947 raise
948 if not wlock:
948 if not wlock:
949 wlock = self.wlock()
949 wlock = self.wlock()
950 for f in list:
950 for f in list:
951 p = self.wjoin(f)
951 p = self.wjoin(f)
952 if os.path.exists(p):
952 if os.path.exists(p):
953 self.ui.warn(_("%s still exists!\n") % f)
953 self.ui.warn(_("%s still exists!\n") % f)
954 elif self.dirstate.state(f) == 'a':
954 elif self.dirstate.state(f) == 'a':
955 self.dirstate.forget([f])
955 self.dirstate.forget([f])
956 elif f not in self.dirstate:
956 elif f not in self.dirstate:
957 self.ui.warn(_("%s not tracked!\n") % f)
957 self.ui.warn(_("%s not tracked!\n") % f)
958 else:
958 else:
959 self.dirstate.update([f], "r")
959 self.dirstate.update([f], "r")
960
960
961 def undelete(self, list, wlock=None):
961 def undelete(self, list, wlock=None):
962 p = self.dirstate.parents()[0]
962 p = self.dirstate.parents()[0]
963 mn = self.changelog.read(p)[0]
963 mn = self.changelog.read(p)[0]
964 m = self.manifest.read(mn)
964 m = self.manifest.read(mn)
965 if not wlock:
965 if not wlock:
966 wlock = self.wlock()
966 wlock = self.wlock()
967 for f in list:
967 for f in list:
968 if self.dirstate.state(f) not in "r":
968 if self.dirstate.state(f) not in "r":
969 self.ui.warn("%s not removed!\n" % f)
969 self.ui.warn("%s not removed!\n" % f)
970 else:
970 else:
971 t = self.file(f).read(m[f])
971 t = self.file(f).read(m[f])
972 self.wwrite(f, t)
972 self.wwrite(f, t)
973 util.set_exec(self.wjoin(f), m.execf(f))
973 util.set_exec(self.wjoin(f), m.execf(f))
974 self.dirstate.update([f], "n")
974 self.dirstate.update([f], "n")
975
975
976 def copy(self, source, dest, wlock=None):
976 def copy(self, source, dest, wlock=None):
977 p = self.wjoin(dest)
977 p = self.wjoin(dest)
978 if not os.path.exists(p):
978 if not os.path.exists(p):
979 self.ui.warn(_("%s does not exist!\n") % dest)
979 self.ui.warn(_("%s does not exist!\n") % dest)
980 elif not os.path.isfile(p):
980 elif not os.path.isfile(p):
981 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
981 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
982 else:
982 else:
983 if not wlock:
983 if not wlock:
984 wlock = self.wlock()
984 wlock = self.wlock()
985 if self.dirstate.state(dest) == '?':
985 if self.dirstate.state(dest) == '?':
986 self.dirstate.update([dest], "a")
986 self.dirstate.update([dest], "a")
987 self.dirstate.copy(source, dest)
987 self.dirstate.copy(source, dest)
988
988
989 def heads(self, start=None):
989 def heads(self, start=None):
990 heads = self.changelog.heads(start)
990 heads = self.changelog.heads(start)
991 # sort the output in rev descending order
991 # sort the output in rev descending order
992 heads = [(-self.changelog.rev(h), h) for h in heads]
992 heads = [(-self.changelog.rev(h), h) for h in heads]
993 heads.sort()
993 heads.sort()
994 return [n for (r, n) in heads]
994 return [n for (r, n) in heads]
995
995
996 # branchlookup returns a dict giving a list of branches for
996 # branchlookup returns a dict giving a list of branches for
997 # each head. A branch is defined as the tag of a node or
997 # each head. A branch is defined as the tag of a node or
998 # the branch of the node's parents. If a node has multiple
998 # the branch of the node's parents. If a node has multiple
999 # branch tags, tags are eliminated if they are visible from other
999 # branch tags, tags are eliminated if they are visible from other
1000 # branch tags.
1000 # branch tags.
1001 #
1001 #
1002 # So, for this graph: a->b->c->d->e
1002 # So, for this graph: a->b->c->d->e
1003 # \ /
1003 # \ /
1004 # aa -----/
1004 # aa -----/
1005 # a has tag 2.6.12
1005 # a has tag 2.6.12
1006 # d has tag 2.6.13
1006 # d has tag 2.6.13
1007 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1007 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1008 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1008 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1009 # from the list.
1009 # from the list.
1010 #
1010 #
1011 # It is possible that more than one head will have the same branch tag.
1011 # It is possible that more than one head will have the same branch tag.
1012 # callers need to check the result for multiple heads under the same
1012 # callers need to check the result for multiple heads under the same
1013 # branch tag if that is a problem for them (ie checkout of a specific
1013 # branch tag if that is a problem for them (ie checkout of a specific
1014 # branch).
1014 # branch).
1015 #
1015 #
1016 # passing in a specific branch will limit the depth of the search
1016 # passing in a specific branch will limit the depth of the search
1017 # through the parents. It won't limit the branches returned in the
1017 # through the parents. It won't limit the branches returned in the
1018 # result though.
1018 # result though.
1019 def branchlookup(self, heads=None, branch=None):
1019 def branchlookup(self, heads=None, branch=None):
1020 if not heads:
1020 if not heads:
1021 heads = self.heads()
1021 heads = self.heads()
1022 headt = [ h for h in heads ]
1022 headt = [ h for h in heads ]
1023 chlog = self.changelog
1023 chlog = self.changelog
1024 branches = {}
1024 branches = {}
1025 merges = []
1025 merges = []
1026 seenmerge = {}
1026 seenmerge = {}
1027
1027
1028 # traverse the tree once for each head, recording in the branches
1028 # traverse the tree once for each head, recording in the branches
1029 # dict which tags are visible from this head. The branches
1029 # dict which tags are visible from this head. The branches
1030 # dict also records which tags are visible from each tag
1030 # dict also records which tags are visible from each tag
1031 # while we traverse.
1031 # while we traverse.
1032 while headt or merges:
1032 while headt or merges:
1033 if merges:
1033 if merges:
1034 n, found = merges.pop()
1034 n, found = merges.pop()
1035 visit = [n]
1035 visit = [n]
1036 else:
1036 else:
1037 h = headt.pop()
1037 h = headt.pop()
1038 visit = [h]
1038 visit = [h]
1039 found = [h]
1039 found = [h]
1040 seen = {}
1040 seen = {}
1041 while visit:
1041 while visit:
1042 n = visit.pop()
1042 n = visit.pop()
1043 if n in seen:
1043 if n in seen:
1044 continue
1044 continue
1045 pp = chlog.parents(n)
1045 pp = chlog.parents(n)
1046 tags = self.nodetags(n)
1046 tags = self.nodetags(n)
1047 if tags:
1047 if tags:
1048 for x in tags:
1048 for x in tags:
1049 if x == 'tip':
1049 if x == 'tip':
1050 continue
1050 continue
1051 for f in found:
1051 for f in found:
1052 branches.setdefault(f, {})[n] = 1
1052 branches.setdefault(f, {})[n] = 1
1053 branches.setdefault(n, {})[n] = 1
1053 branches.setdefault(n, {})[n] = 1
1054 break
1054 break
1055 if n not in found:
1055 if n not in found:
1056 found.append(n)
1056 found.append(n)
1057 if branch in tags:
1057 if branch in tags:
1058 continue
1058 continue
1059 seen[n] = 1
1059 seen[n] = 1
1060 if pp[1] != nullid and n not in seenmerge:
1060 if pp[1] != nullid and n not in seenmerge:
1061 merges.append((pp[1], [x for x in found]))
1061 merges.append((pp[1], [x for x in found]))
1062 seenmerge[n] = 1
1062 seenmerge[n] = 1
1063 if pp[0] != nullid:
1063 if pp[0] != nullid:
1064 visit.append(pp[0])
1064 visit.append(pp[0])
1065 # traverse the branches dict, eliminating branch tags from each
1065 # traverse the branches dict, eliminating branch tags from each
1066 # head that are visible from another branch tag for that head.
1066 # head that are visible from another branch tag for that head.
1067 out = {}
1067 out = {}
1068 viscache = {}
1068 viscache = {}
1069 for h in heads:
1069 for h in heads:
1070 def visible(node):
1070 def visible(node):
1071 if node in viscache:
1071 if node in viscache:
1072 return viscache[node]
1072 return viscache[node]
1073 ret = {}
1073 ret = {}
1074 visit = [node]
1074 visit = [node]
1075 while visit:
1075 while visit:
1076 x = visit.pop()
1076 x = visit.pop()
1077 if x in viscache:
1077 if x in viscache:
1078 ret.update(viscache[x])
1078 ret.update(viscache[x])
1079 elif x not in ret:
1079 elif x not in ret:
1080 ret[x] = 1
1080 ret[x] = 1
1081 if x in branches:
1081 if x in branches:
1082 visit[len(visit):] = branches[x].keys()
1082 visit[len(visit):] = branches[x].keys()
1083 viscache[node] = ret
1083 viscache[node] = ret
1084 return ret
1084 return ret
1085 if h not in branches:
1085 if h not in branches:
1086 continue
1086 continue
1087 # O(n^2), but somewhat limited. This only searches the
1087 # O(n^2), but somewhat limited. This only searches the
1088 # tags visible from a specific head, not all the tags in the
1088 # tags visible from a specific head, not all the tags in the
1089 # whole repo.
1089 # whole repo.
1090 for b in branches[h]:
1090 for b in branches[h]:
1091 vis = False
1091 vis = False
1092 for bb in branches[h].keys():
1092 for bb in branches[h].keys():
1093 if b != bb:
1093 if b != bb:
1094 if b in visible(bb):
1094 if b in visible(bb):
1095 vis = True
1095 vis = True
1096 break
1096 break
1097 if not vis:
1097 if not vis:
1098 l = out.setdefault(h, [])
1098 l = out.setdefault(h, [])
1099 l[len(l):] = self.nodetags(b)
1099 l[len(l):] = self.nodetags(b)
1100 return out
1100 return out
1101
1101
1102 def branches(self, nodes):
1102 def branches(self, nodes):
1103 if not nodes:
1103 if not nodes:
1104 nodes = [self.changelog.tip()]
1104 nodes = [self.changelog.tip()]
1105 b = []
1105 b = []
1106 for n in nodes:
1106 for n in nodes:
1107 t = n
1107 t = n
1108 while 1:
1108 while 1:
1109 p = self.changelog.parents(n)
1109 p = self.changelog.parents(n)
1110 if p[1] != nullid or p[0] == nullid:
1110 if p[1] != nullid or p[0] == nullid:
1111 b.append((t, n, p[0], p[1]))
1111 b.append((t, n, p[0], p[1]))
1112 break
1112 break
1113 n = p[0]
1113 n = p[0]
1114 return b
1114 return b
1115
1115
1116 def between(self, pairs):
1116 def between(self, pairs):
1117 r = []
1117 r = []
1118
1118
1119 for top, bottom in pairs:
1119 for top, bottom in pairs:
1120 n, l, i = top, [], 0
1120 n, l, i = top, [], 0
1121 f = 1
1121 f = 1
1122
1122
1123 while n != bottom:
1123 while n != bottom:
1124 p = self.changelog.parents(n)[0]
1124 p = self.changelog.parents(n)[0]
1125 if i == f:
1125 if i == f:
1126 l.append(n)
1126 l.append(n)
1127 f = f * 2
1127 f = f * 2
1128 n = p
1128 n = p
1129 i += 1
1129 i += 1
1130
1130
1131 r.append(l)
1131 r.append(l)
1132
1132
1133 return r
1133 return r
1134
1134
1135 def findincoming(self, remote, base=None, heads=None, force=False):
1135 def findincoming(self, remote, base=None, heads=None, force=False):
1136 """Return list of roots of the subsets of missing nodes from remote
1136 """Return list of roots of the subsets of missing nodes from remote
1137
1137
1138 If base dict is specified, assume that these nodes and their parents
1138 If base dict is specified, assume that these nodes and their parents
1139 exist on the remote side and that no child of a node of base exists
1139 exist on the remote side and that no child of a node of base exists
1140 in both remote and self.
1140 in both remote and self.
1141 Furthermore base will be updated to include the nodes that exists
1141 Furthermore base will be updated to include the nodes that exists
1142 in self and remote but no children exists in self and remote.
1142 in self and remote but no children exists in self and remote.
1143 If a list of heads is specified, return only nodes which are heads
1143 If a list of heads is specified, return only nodes which are heads
1144 or ancestors of these heads.
1144 or ancestors of these heads.
1145
1145
1146 All the ancestors of base are in self and in remote.
1146 All the ancestors of base are in self and in remote.
1147 All the descendants of the list returned are missing in self.
1147 All the descendants of the list returned are missing in self.
1148 (and so we know that the rest of the nodes are missing in remote, see
1148 (and so we know that the rest of the nodes are missing in remote, see
1149 outgoing)
1149 outgoing)
1150 """
1150 """
1151 m = self.changelog.nodemap
1151 m = self.changelog.nodemap
1152 search = []
1152 search = []
1153 fetch = {}
1153 fetch = {}
1154 seen = {}
1154 seen = {}
1155 seenbranch = {}
1155 seenbranch = {}
1156 if base == None:
1156 if base == None:
1157 base = {}
1157 base = {}
1158
1158
1159 if not heads:
1159 if not heads:
1160 heads = remote.heads()
1160 heads = remote.heads()
1161
1161
1162 if self.changelog.tip() == nullid:
1162 if self.changelog.tip() == nullid:
1163 base[nullid] = 1
1163 base[nullid] = 1
1164 if heads != [nullid]:
1164 if heads != [nullid]:
1165 return [nullid]
1165 return [nullid]
1166 return []
1166 return []
1167
1167
1168 # assume we're closer to the tip than the root
1168 # assume we're closer to the tip than the root
1169 # and start by examining the heads
1169 # and start by examining the heads
1170 self.ui.status(_("searching for changes\n"))
1170 self.ui.status(_("searching for changes\n"))
1171
1171
1172 unknown = []
1172 unknown = []
1173 for h in heads:
1173 for h in heads:
1174 if h not in m:
1174 if h not in m:
1175 unknown.append(h)
1175 unknown.append(h)
1176 else:
1176 else:
1177 base[h] = 1
1177 base[h] = 1
1178
1178
1179 if not unknown:
1179 if not unknown:
1180 return []
1180 return []
1181
1181
1182 req = dict.fromkeys(unknown)
1182 req = dict.fromkeys(unknown)
1183 reqcnt = 0
1183 reqcnt = 0
1184
1184
1185 # search through remote branches
1185 # search through remote branches
1186 # a 'branch' here is a linear segment of history, with four parts:
1186 # a 'branch' here is a linear segment of history, with four parts:
1187 # head, root, first parent, second parent
1187 # head, root, first parent, second parent
1188 # (a branch always has two parents (or none) by definition)
1188 # (a branch always has two parents (or none) by definition)
1189 unknown = remote.branches(unknown)
1189 unknown = remote.branches(unknown)
1190 while unknown:
1190 while unknown:
1191 r = []
1191 r = []
1192 while unknown:
1192 while unknown:
1193 n = unknown.pop(0)
1193 n = unknown.pop(0)
1194 if n[0] in seen:
1194 if n[0] in seen:
1195 continue
1195 continue
1196
1196
1197 self.ui.debug(_("examining %s:%s\n")
1197 self.ui.debug(_("examining %s:%s\n")
1198 % (short(n[0]), short(n[1])))
1198 % (short(n[0]), short(n[1])))
1199 if n[0] == nullid: # found the end of the branch
1199 if n[0] == nullid: # found the end of the branch
1200 pass
1200 pass
1201 elif n in seenbranch:
1201 elif n in seenbranch:
1202 self.ui.debug(_("branch already found\n"))
1202 self.ui.debug(_("branch already found\n"))
1203 continue
1203 continue
1204 elif n[1] and n[1] in m: # do we know the base?
1204 elif n[1] and n[1] in m: # do we know the base?
1205 self.ui.debug(_("found incomplete branch %s:%s\n")
1205 self.ui.debug(_("found incomplete branch %s:%s\n")
1206 % (short(n[0]), short(n[1])))
1206 % (short(n[0]), short(n[1])))
1207 search.append(n) # schedule branch range for scanning
1207 search.append(n) # schedule branch range for scanning
1208 seenbranch[n] = 1
1208 seenbranch[n] = 1
1209 else:
1209 else:
1210 if n[1] not in seen and n[1] not in fetch:
1210 if n[1] not in seen and n[1] not in fetch:
1211 if n[2] in m and n[3] in m:
1211 if n[2] in m and n[3] in m:
1212 self.ui.debug(_("found new changeset %s\n") %
1212 self.ui.debug(_("found new changeset %s\n") %
1213 short(n[1]))
1213 short(n[1]))
1214 fetch[n[1]] = 1 # earliest unknown
1214 fetch[n[1]] = 1 # earliest unknown
1215 for p in n[2:4]:
1215 for p in n[2:4]:
1216 if p in m:
1216 if p in m:
1217 base[p] = 1 # latest known
1217 base[p] = 1 # latest known
1218
1218
1219 for p in n[2:4]:
1219 for p in n[2:4]:
1220 if p not in req and p not in m:
1220 if p not in req and p not in m:
1221 r.append(p)
1221 r.append(p)
1222 req[p] = 1
1222 req[p] = 1
1223 seen[n[0]] = 1
1223 seen[n[0]] = 1
1224
1224
1225 if r:
1225 if r:
1226 reqcnt += 1
1226 reqcnt += 1
1227 self.ui.debug(_("request %d: %s\n") %
1227 self.ui.debug(_("request %d: %s\n") %
1228 (reqcnt, " ".join(map(short, r))))
1228 (reqcnt, " ".join(map(short, r))))
1229 for p in xrange(0, len(r), 10):
1229 for p in xrange(0, len(r), 10):
1230 for b in remote.branches(r[p:p+10]):
1230 for b in remote.branches(r[p:p+10]):
1231 self.ui.debug(_("received %s:%s\n") %
1231 self.ui.debug(_("received %s:%s\n") %
1232 (short(b[0]), short(b[1])))
1232 (short(b[0]), short(b[1])))
1233 unknown.append(b)
1233 unknown.append(b)
1234
1234
1235 # do binary search on the branches we found
1235 # do binary search on the branches we found
1236 while search:
1236 while search:
1237 n = search.pop(0)
1237 n = search.pop(0)
1238 reqcnt += 1
1238 reqcnt += 1
1239 l = remote.between([(n[0], n[1])])[0]
1239 l = remote.between([(n[0], n[1])])[0]
1240 l.append(n[1])
1240 l.append(n[1])
1241 p = n[0]
1241 p = n[0]
1242 f = 1
1242 f = 1
1243 for i in l:
1243 for i in l:
1244 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1244 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1245 if i in m:
1245 if i in m:
1246 if f <= 2:
1246 if f <= 2:
1247 self.ui.debug(_("found new branch changeset %s\n") %
1247 self.ui.debug(_("found new branch changeset %s\n") %
1248 short(p))
1248 short(p))
1249 fetch[p] = 1
1249 fetch[p] = 1
1250 base[i] = 1
1250 base[i] = 1
1251 else:
1251 else:
1252 self.ui.debug(_("narrowed branch search to %s:%s\n")
1252 self.ui.debug(_("narrowed branch search to %s:%s\n")
1253 % (short(p), short(i)))
1253 % (short(p), short(i)))
1254 search.append((p, i))
1254 search.append((p, i))
1255 break
1255 break
1256 p, f = i, f * 2
1256 p, f = i, f * 2
1257
1257
1258 # sanity check our fetch list
1258 # sanity check our fetch list
1259 for f in fetch.keys():
1259 for f in fetch.keys():
1260 if f in m:
1260 if f in m:
1261 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1261 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1262
1262
1263 if base.keys() == [nullid]:
1263 if base.keys() == [nullid]:
1264 if force:
1264 if force:
1265 self.ui.warn(_("warning: repository is unrelated\n"))
1265 self.ui.warn(_("warning: repository is unrelated\n"))
1266 else:
1266 else:
1267 raise util.Abort(_("repository is unrelated"))
1267 raise util.Abort(_("repository is unrelated"))
1268
1268
1269 self.ui.debug(_("found new changesets starting at ") +
1269 self.ui.debug(_("found new changesets starting at ") +
1270 " ".join([short(f) for f in fetch]) + "\n")
1270 " ".join([short(f) for f in fetch]) + "\n")
1271
1271
1272 self.ui.debug(_("%d total queries\n") % reqcnt)
1272 self.ui.debug(_("%d total queries\n") % reqcnt)
1273
1273
1274 return fetch.keys()
1274 return fetch.keys()
1275
1275
1276 def findoutgoing(self, remote, base=None, heads=None, force=False):
1276 def findoutgoing(self, remote, base=None, heads=None, force=False):
1277 """Return list of nodes that are roots of subsets not in remote
1277 """Return list of nodes that are roots of subsets not in remote
1278
1278
1279 If base dict is specified, assume that these nodes and their parents
1279 If base dict is specified, assume that these nodes and their parents
1280 exist on the remote side.
1280 exist on the remote side.
1281 If a list of heads is specified, return only nodes which are heads
1281 If a list of heads is specified, return only nodes which are heads
1282 or ancestors of these heads, and return a second element which
1282 or ancestors of these heads, and return a second element which
1283 contains all remote heads which get new children.
1283 contains all remote heads which get new children.
1284 """
1284 """
1285 if base == None:
1285 if base == None:
1286 base = {}
1286 base = {}
1287 self.findincoming(remote, base, heads, force=force)
1287 self.findincoming(remote, base, heads, force=force)
1288
1288
1289 self.ui.debug(_("common changesets up to ")
1289 self.ui.debug(_("common changesets up to ")
1290 + " ".join(map(short, base.keys())) + "\n")
1290 + " ".join(map(short, base.keys())) + "\n")
1291
1291
1292 remain = dict.fromkeys(self.changelog.nodemap)
1292 remain = dict.fromkeys(self.changelog.nodemap)
1293
1293
1294 # prune everything remote has from the tree
1294 # prune everything remote has from the tree
1295 del remain[nullid]
1295 del remain[nullid]
1296 remove = base.keys()
1296 remove = base.keys()
1297 while remove:
1297 while remove:
1298 n = remove.pop(0)
1298 n = remove.pop(0)
1299 if n in remain:
1299 if n in remain:
1300 del remain[n]
1300 del remain[n]
1301 for p in self.changelog.parents(n):
1301 for p in self.changelog.parents(n):
1302 remove.append(p)
1302 remove.append(p)
1303
1303
1304 # find every node whose parents have been pruned
1304 # find every node whose parents have been pruned
1305 subset = []
1305 subset = []
1306 # find every remote head that will get new children
1306 # find every remote head that will get new children
1307 updated_heads = {}
1307 updated_heads = {}
1308 for n in remain:
1308 for n in remain:
1309 p1, p2 = self.changelog.parents(n)
1309 p1, p2 = self.changelog.parents(n)
1310 if p1 not in remain and p2 not in remain:
1310 if p1 not in remain and p2 not in remain:
1311 subset.append(n)
1311 subset.append(n)
1312 if heads:
1312 if heads:
1313 if p1 in heads:
1313 if p1 in heads:
1314 updated_heads[p1] = True
1314 updated_heads[p1] = True
1315 if p2 in heads:
1315 if p2 in heads:
1316 updated_heads[p2] = True
1316 updated_heads[p2] = True
1317
1317
1318 # this is the set of all roots we have to push
1318 # this is the set of all roots we have to push
1319 if heads:
1319 if heads:
1320 return subset, updated_heads.keys()
1320 return subset, updated_heads.keys()
1321 else:
1321 else:
1322 return subset
1322 return subset
1323
1323
1324 def pull(self, remote, heads=None, force=False, lock=None):
1324 def pull(self, remote, heads=None, force=False, lock=None):
1325 mylock = False
1325 mylock = False
1326 if not lock:
1326 if not lock:
1327 lock = self.lock()
1327 lock = self.lock()
1328 mylock = True
1328 mylock = True
1329
1329
1330 try:
1330 try:
1331 fetch = self.findincoming(remote, force=force)
1331 fetch = self.findincoming(remote, force=force)
1332 if fetch == [nullid]:
1332 if fetch == [nullid]:
1333 self.ui.status(_("requesting all changes\n"))
1333 self.ui.status(_("requesting all changes\n"))
1334
1334
1335 if not fetch:
1335 if not fetch:
1336 self.ui.status(_("no changes found\n"))
1336 self.ui.status(_("no changes found\n"))
1337 return 0
1337 return 0
1338
1338
1339 if heads is None:
1339 if heads is None:
1340 cg = remote.changegroup(fetch, 'pull')
1340 cg = remote.changegroup(fetch, 'pull')
1341 else:
1341 else:
1342 if 'changegroupsubset' not in remote.capabilities:
1342 if 'changegroupsubset' not in remote.capabilities:
1343 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1343 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1344 cg = remote.changegroupsubset(fetch, heads, 'pull')
1344 cg = remote.changegroupsubset(fetch, heads, 'pull')
1345 return self.addchangegroup(cg, 'pull', remote.url())
1345 return self.addchangegroup(cg, 'pull', remote.url())
1346 finally:
1346 finally:
1347 if mylock:
1347 if mylock:
1348 lock.release()
1348 lock.release()
1349
1349
1350 def push(self, remote, force=False, revs=None):
1350 def push(self, remote, force=False, revs=None):
1351 # there are two ways to push to remote repo:
1351 # there are two ways to push to remote repo:
1352 #
1352 #
1353 # addchangegroup assumes local user can lock remote
1353 # addchangegroup assumes local user can lock remote
1354 # repo (local filesystem, old ssh servers).
1354 # repo (local filesystem, old ssh servers).
1355 #
1355 #
1356 # unbundle assumes local user cannot lock remote repo (new ssh
1356 # unbundle assumes local user cannot lock remote repo (new ssh
1357 # servers, http servers).
1357 # servers, http servers).
1358
1358
1359 if remote.capable('unbundle'):
1359 if remote.capable('unbundle'):
1360 return self.push_unbundle(remote, force, revs)
1360 return self.push_unbundle(remote, force, revs)
1361 return self.push_addchangegroup(remote, force, revs)
1361 return self.push_addchangegroup(remote, force, revs)
1362
1362
1363 def prepush(self, remote, force, revs):
1363 def prepush(self, remote, force, revs):
1364 base = {}
1364 base = {}
1365 remote_heads = remote.heads()
1365 remote_heads = remote.heads()
1366 inc = self.findincoming(remote, base, remote_heads, force=force)
1366 inc = self.findincoming(remote, base, remote_heads, force=force)
1367
1367
1368 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1368 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1369 if revs is not None:
1369 if revs is not None:
1370 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1370 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1371 else:
1371 else:
1372 bases, heads = update, self.changelog.heads()
1372 bases, heads = update, self.changelog.heads()
1373
1373
1374 if not bases:
1374 if not bases:
1375 self.ui.status(_("no changes found\n"))
1375 self.ui.status(_("no changes found\n"))
1376 return None, 1
1376 return None, 1
1377 elif not force:
1377 elif not force:
1378 # check if we're creating new remote heads
1378 # check if we're creating new remote heads
1379 # to be a remote head after push, node must be either
1379 # to be a remote head after push, node must be either
1380 # - unknown locally
1380 # - unknown locally
1381 # - a local outgoing head descended from update
1381 # - a local outgoing head descended from update
1382 # - a remote head that's known locally and not
1382 # - a remote head that's known locally and not
1383 # ancestral to an outgoing head
1383 # ancestral to an outgoing head
1384
1384
1385 warn = 0
1385 warn = 0
1386
1386
1387 if remote_heads == [nullid]:
1387 if remote_heads == [nullid]:
1388 warn = 0
1388 warn = 0
1389 elif not revs and len(heads) > len(remote_heads):
1389 elif not revs and len(heads) > len(remote_heads):
1390 warn = 1
1390 warn = 1
1391 else:
1391 else:
1392 newheads = list(heads)
1392 newheads = list(heads)
1393 for r in remote_heads:
1393 for r in remote_heads:
1394 if r in self.changelog.nodemap:
1394 if r in self.changelog.nodemap:
1395 desc = self.changelog.heads(r)
1395 desc = self.changelog.heads(r, heads)
1396 l = [h for h in heads if h in desc]
1396 l = [h for h in heads if h in desc]
1397 if not l:
1397 if not l:
1398 newheads.append(r)
1398 newheads.append(r)
1399 else:
1399 else:
1400 newheads.append(r)
1400 newheads.append(r)
1401 if len(newheads) > len(remote_heads):
1401 if len(newheads) > len(remote_heads):
1402 warn = 1
1402 warn = 1
1403
1403
1404 if warn:
1404 if warn:
1405 self.ui.warn(_("abort: push creates new remote branches!\n"))
1405 self.ui.warn(_("abort: push creates new remote branches!\n"))
1406 self.ui.status(_("(did you forget to merge?"
1406 self.ui.status(_("(did you forget to merge?"
1407 " use push -f to force)\n"))
1407 " use push -f to force)\n"))
1408 return None, 1
1408 return None, 1
1409 elif inc:
1409 elif inc:
1410 self.ui.warn(_("note: unsynced remote changes!\n"))
1410 self.ui.warn(_("note: unsynced remote changes!\n"))
1411
1411
1412
1412
1413 if revs is None:
1413 if revs is None:
1414 cg = self.changegroup(update, 'push')
1414 cg = self.changegroup(update, 'push')
1415 else:
1415 else:
1416 cg = self.changegroupsubset(update, revs, 'push')
1416 cg = self.changegroupsubset(update, revs, 'push')
1417 return cg, remote_heads
1417 return cg, remote_heads
1418
1418
1419 def push_addchangegroup(self, remote, force, revs):
1419 def push_addchangegroup(self, remote, force, revs):
1420 lock = remote.lock()
1420 lock = remote.lock()
1421
1421
1422 ret = self.prepush(remote, force, revs)
1422 ret = self.prepush(remote, force, revs)
1423 if ret[0] is not None:
1423 if ret[0] is not None:
1424 cg, remote_heads = ret
1424 cg, remote_heads = ret
1425 return remote.addchangegroup(cg, 'push', self.url())
1425 return remote.addchangegroup(cg, 'push', self.url())
1426 return ret[1]
1426 return ret[1]
1427
1427
1428 def push_unbundle(self, remote, force, revs):
1428 def push_unbundle(self, remote, force, revs):
1429 # local repo finds heads on server, finds out what revs it
1429 # local repo finds heads on server, finds out what revs it
1430 # must push. once revs transferred, if server finds it has
1430 # must push. once revs transferred, if server finds it has
1431 # different heads (someone else won commit/push race), server
1431 # different heads (someone else won commit/push race), server
1432 # aborts.
1432 # aborts.
1433
1433
1434 ret = self.prepush(remote, force, revs)
1434 ret = self.prepush(remote, force, revs)
1435 if ret[0] is not None:
1435 if ret[0] is not None:
1436 cg, remote_heads = ret
1436 cg, remote_heads = ret
1437 if force: remote_heads = ['force']
1437 if force: remote_heads = ['force']
1438 return remote.unbundle(cg, remote_heads, 'push')
1438 return remote.unbundle(cg, remote_heads, 'push')
1439 return ret[1]
1439 return ret[1]
1440
1440
1441 def changegroupinfo(self, nodes):
1441 def changegroupinfo(self, nodes):
1442 self.ui.note(_("%d changesets found\n") % len(nodes))
1442 self.ui.note(_("%d changesets found\n") % len(nodes))
1443 if self.ui.debugflag:
1443 if self.ui.debugflag:
1444 self.ui.debug(_("List of changesets:\n"))
1444 self.ui.debug(_("List of changesets:\n"))
1445 for node in nodes:
1445 for node in nodes:
1446 self.ui.debug("%s\n" % hex(node))
1446 self.ui.debug("%s\n" % hex(node))
1447
1447
1448 def changegroupsubset(self, bases, heads, source):
1448 def changegroupsubset(self, bases, heads, source):
1449 """This function generates a changegroup consisting of all the nodes
1449 """This function generates a changegroup consisting of all the nodes
1450 that are descendents of any of the bases, and ancestors of any of
1450 that are descendents of any of the bases, and ancestors of any of
1451 the heads.
1451 the heads.
1452
1452
1453 It is fairly complex as determining which filenodes and which
1453 It is fairly complex as determining which filenodes and which
1454 manifest nodes need to be included for the changeset to be complete
1454 manifest nodes need to be included for the changeset to be complete
1455 is non-trivial.
1455 is non-trivial.
1456
1456
1457 Another wrinkle is doing the reverse, figuring out which changeset in
1457 Another wrinkle is doing the reverse, figuring out which changeset in
1458 the changegroup a particular filenode or manifestnode belongs to."""
1458 the changegroup a particular filenode or manifestnode belongs to."""
1459
1459
1460 self.hook('preoutgoing', throw=True, source=source)
1460 self.hook('preoutgoing', throw=True, source=source)
1461
1461
1462 # Set up some initial variables
1462 # Set up some initial variables
1463 # Make it easy to refer to self.changelog
1463 # Make it easy to refer to self.changelog
1464 cl = self.changelog
1464 cl = self.changelog
1465 # msng is short for missing - compute the list of changesets in this
1465 # msng is short for missing - compute the list of changesets in this
1466 # changegroup.
1466 # changegroup.
1467 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1467 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1468 self.changegroupinfo(msng_cl_lst)
1468 self.changegroupinfo(msng_cl_lst)
1469 # Some bases may turn out to be superfluous, and some heads may be
1469 # Some bases may turn out to be superfluous, and some heads may be
1470 # too. nodesbetween will return the minimal set of bases and heads
1470 # too. nodesbetween will return the minimal set of bases and heads
1471 # necessary to re-create the changegroup.
1471 # necessary to re-create the changegroup.
1472
1472
1473 # Known heads are the list of heads that it is assumed the recipient
1473 # Known heads are the list of heads that it is assumed the recipient
1474 # of this changegroup will know about.
1474 # of this changegroup will know about.
1475 knownheads = {}
1475 knownheads = {}
1476 # We assume that all parents of bases are known heads.
1476 # We assume that all parents of bases are known heads.
1477 for n in bases:
1477 for n in bases:
1478 for p in cl.parents(n):
1478 for p in cl.parents(n):
1479 if p != nullid:
1479 if p != nullid:
1480 knownheads[p] = 1
1480 knownheads[p] = 1
1481 knownheads = knownheads.keys()
1481 knownheads = knownheads.keys()
1482 if knownheads:
1482 if knownheads:
1483 # Now that we know what heads are known, we can compute which
1483 # Now that we know what heads are known, we can compute which
1484 # changesets are known. The recipient must know about all
1484 # changesets are known. The recipient must know about all
1485 # changesets required to reach the known heads from the null
1485 # changesets required to reach the known heads from the null
1486 # changeset.
1486 # changeset.
1487 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1487 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1488 junk = None
1488 junk = None
1489 # Transform the list into an ersatz set.
1489 # Transform the list into an ersatz set.
1490 has_cl_set = dict.fromkeys(has_cl_set)
1490 has_cl_set = dict.fromkeys(has_cl_set)
1491 else:
1491 else:
1492 # If there were no known heads, the recipient cannot be assumed to
1492 # If there were no known heads, the recipient cannot be assumed to
1493 # know about any changesets.
1493 # know about any changesets.
1494 has_cl_set = {}
1494 has_cl_set = {}
1495
1495
1496 # Make it easy to refer to self.manifest
1496 # Make it easy to refer to self.manifest
1497 mnfst = self.manifest
1497 mnfst = self.manifest
1498 # We don't know which manifests are missing yet
1498 # We don't know which manifests are missing yet
1499 msng_mnfst_set = {}
1499 msng_mnfst_set = {}
1500 # Nor do we know which filenodes are missing.
1500 # Nor do we know which filenodes are missing.
1501 msng_filenode_set = {}
1501 msng_filenode_set = {}
1502
1502
1503 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1503 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1504 junk = None
1504 junk = None
1505
1505
1506 # A changeset always belongs to itself, so the changenode lookup
1506 # A changeset always belongs to itself, so the changenode lookup
1507 # function for a changenode is identity.
1507 # function for a changenode is identity.
1508 def identity(x):
1508 def identity(x):
1509 return x
1509 return x
1510
1510
1511 # A function generating function. Sets up an environment for the
1511 # A function generating function. Sets up an environment for the
1512 # inner function.
1512 # inner function.
1513 def cmp_by_rev_func(revlog):
1513 def cmp_by_rev_func(revlog):
1514 # Compare two nodes by their revision number in the environment's
1514 # Compare two nodes by their revision number in the environment's
1515 # revision history. Since the revision number both represents the
1515 # revision history. Since the revision number both represents the
1516 # most efficient order to read the nodes in, and represents a
1516 # most efficient order to read the nodes in, and represents a
1517 # topological sorting of the nodes, this function is often useful.
1517 # topological sorting of the nodes, this function is often useful.
1518 def cmp_by_rev(a, b):
1518 def cmp_by_rev(a, b):
1519 return cmp(revlog.rev(a), revlog.rev(b))
1519 return cmp(revlog.rev(a), revlog.rev(b))
1520 return cmp_by_rev
1520 return cmp_by_rev
1521
1521
1522 # If we determine that a particular file or manifest node must be a
1522 # If we determine that a particular file or manifest node must be a
1523 # node that the recipient of the changegroup will already have, we can
1523 # node that the recipient of the changegroup will already have, we can
1524 # also assume the recipient will have all the parents. This function
1524 # also assume the recipient will have all the parents. This function
1525 # prunes them from the set of missing nodes.
1525 # prunes them from the set of missing nodes.
1526 def prune_parents(revlog, hasset, msngset):
1526 def prune_parents(revlog, hasset, msngset):
1527 haslst = hasset.keys()
1527 haslst = hasset.keys()
1528 haslst.sort(cmp_by_rev_func(revlog))
1528 haslst.sort(cmp_by_rev_func(revlog))
1529 for node in haslst:
1529 for node in haslst:
1530 parentlst = [p for p in revlog.parents(node) if p != nullid]
1530 parentlst = [p for p in revlog.parents(node) if p != nullid]
1531 while parentlst:
1531 while parentlst:
1532 n = parentlst.pop()
1532 n = parentlst.pop()
1533 if n not in hasset:
1533 if n not in hasset:
1534 hasset[n] = 1
1534 hasset[n] = 1
1535 p = [p for p in revlog.parents(n) if p != nullid]
1535 p = [p for p in revlog.parents(n) if p != nullid]
1536 parentlst.extend(p)
1536 parentlst.extend(p)
1537 for n in hasset:
1537 for n in hasset:
1538 msngset.pop(n, None)
1538 msngset.pop(n, None)
1539
1539
1540 # This is a function generating function used to set up an environment
1540 # This is a function generating function used to set up an environment
1541 # for the inner function to execute in.
1541 # for the inner function to execute in.
1542 def manifest_and_file_collector(changedfileset):
1542 def manifest_and_file_collector(changedfileset):
1543 # This is an information gathering function that gathers
1543 # This is an information gathering function that gathers
1544 # information from each changeset node that goes out as part of
1544 # information from each changeset node that goes out as part of
1545 # the changegroup. The information gathered is a list of which
1545 # the changegroup. The information gathered is a list of which
1546 # manifest nodes are potentially required (the recipient may
1546 # manifest nodes are potentially required (the recipient may
1547 # already have them) and total list of all files which were
1547 # already have them) and total list of all files which were
1548 # changed in any changeset in the changegroup.
1548 # changed in any changeset in the changegroup.
1549 #
1549 #
1550 # We also remember the first changenode we saw any manifest
1550 # We also remember the first changenode we saw any manifest
1551 # referenced by so we can later determine which changenode 'owns'
1551 # referenced by so we can later determine which changenode 'owns'
1552 # the manifest.
1552 # the manifest.
1553 def collect_manifests_and_files(clnode):
1553 def collect_manifests_and_files(clnode):
1554 c = cl.read(clnode)
1554 c = cl.read(clnode)
1555 for f in c[3]:
1555 for f in c[3]:
1556 # This is to make sure we only have one instance of each
1556 # This is to make sure we only have one instance of each
1557 # filename string for each filename.
1557 # filename string for each filename.
1558 changedfileset.setdefault(f, f)
1558 changedfileset.setdefault(f, f)
1559 msng_mnfst_set.setdefault(c[0], clnode)
1559 msng_mnfst_set.setdefault(c[0], clnode)
1560 return collect_manifests_and_files
1560 return collect_manifests_and_files
1561
1561
1562 # Figure out which manifest nodes (of the ones we think might be part
1562 # Figure out which manifest nodes (of the ones we think might be part
1563 # of the changegroup) the recipient must know about and remove them
1563 # of the changegroup) the recipient must know about and remove them
1564 # from the changegroup.
1564 # from the changegroup.
1565 def prune_manifests():
1565 def prune_manifests():
1566 has_mnfst_set = {}
1566 has_mnfst_set = {}
1567 for n in msng_mnfst_set:
1567 for n in msng_mnfst_set:
1568 # If a 'missing' manifest thinks it belongs to a changenode
1568 # If a 'missing' manifest thinks it belongs to a changenode
1569 # the recipient is assumed to have, obviously the recipient
1569 # the recipient is assumed to have, obviously the recipient
1570 # must have that manifest.
1570 # must have that manifest.
1571 linknode = cl.node(mnfst.linkrev(n))
1571 linknode = cl.node(mnfst.linkrev(n))
1572 if linknode in has_cl_set:
1572 if linknode in has_cl_set:
1573 has_mnfst_set[n] = 1
1573 has_mnfst_set[n] = 1
1574 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1574 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1575
1575
1576 # Use the information collected in collect_manifests_and_files to say
1576 # Use the information collected in collect_manifests_and_files to say
1577 # which changenode any manifestnode belongs to.
1577 # which changenode any manifestnode belongs to.
1578 def lookup_manifest_link(mnfstnode):
1578 def lookup_manifest_link(mnfstnode):
1579 return msng_mnfst_set[mnfstnode]
1579 return msng_mnfst_set[mnfstnode]
1580
1580
1581 # A function generating function that sets up the initial environment
1581 # A function generating function that sets up the initial environment
1582 # the inner function.
1582 # the inner function.
1583 def filenode_collector(changedfiles):
1583 def filenode_collector(changedfiles):
1584 next_rev = [0]
1584 next_rev = [0]
1585 # This gathers information from each manifestnode included in the
1585 # This gathers information from each manifestnode included in the
1586 # changegroup about which filenodes the manifest node references
1586 # changegroup about which filenodes the manifest node references
1587 # so we can include those in the changegroup too.
1587 # so we can include those in the changegroup too.
1588 #
1588 #
1589 # It also remembers which changenode each filenode belongs to. It
1589 # It also remembers which changenode each filenode belongs to. It
1590 # does this by assuming the a filenode belongs to the changenode
1590 # does this by assuming the a filenode belongs to the changenode
1591 # the first manifest that references it belongs to.
1591 # the first manifest that references it belongs to.
1592 def collect_msng_filenodes(mnfstnode):
1592 def collect_msng_filenodes(mnfstnode):
1593 r = mnfst.rev(mnfstnode)
1593 r = mnfst.rev(mnfstnode)
1594 if r == next_rev[0]:
1594 if r == next_rev[0]:
1595 # If the last rev we looked at was the one just previous,
1595 # If the last rev we looked at was the one just previous,
1596 # we only need to see a diff.
1596 # we only need to see a diff.
1597 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1597 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1598 # For each line in the delta
1598 # For each line in the delta
1599 for dline in delta.splitlines():
1599 for dline in delta.splitlines():
1600 # get the filename and filenode for that line
1600 # get the filename and filenode for that line
1601 f, fnode = dline.split('\0')
1601 f, fnode = dline.split('\0')
1602 fnode = bin(fnode[:40])
1602 fnode = bin(fnode[:40])
1603 f = changedfiles.get(f, None)
1603 f = changedfiles.get(f, None)
1604 # And if the file is in the list of files we care
1604 # And if the file is in the list of files we care
1605 # about.
1605 # about.
1606 if f is not None:
1606 if f is not None:
1607 # Get the changenode this manifest belongs to
1607 # Get the changenode this manifest belongs to
1608 clnode = msng_mnfst_set[mnfstnode]
1608 clnode = msng_mnfst_set[mnfstnode]
1609 # Create the set of filenodes for the file if
1609 # Create the set of filenodes for the file if
1610 # there isn't one already.
1610 # there isn't one already.
1611 ndset = msng_filenode_set.setdefault(f, {})
1611 ndset = msng_filenode_set.setdefault(f, {})
1612 # And set the filenode's changelog node to the
1612 # And set the filenode's changelog node to the
1613 # manifest's if it hasn't been set already.
1613 # manifest's if it hasn't been set already.
1614 ndset.setdefault(fnode, clnode)
1614 ndset.setdefault(fnode, clnode)
1615 else:
1615 else:
1616 # Otherwise we need a full manifest.
1616 # Otherwise we need a full manifest.
1617 m = mnfst.read(mnfstnode)
1617 m = mnfst.read(mnfstnode)
1618 # For every file in we care about.
1618 # For every file in we care about.
1619 for f in changedfiles:
1619 for f in changedfiles:
1620 fnode = m.get(f, None)
1620 fnode = m.get(f, None)
1621 # If it's in the manifest
1621 # If it's in the manifest
1622 if fnode is not None:
1622 if fnode is not None:
1623 # See comments above.
1623 # See comments above.
1624 clnode = msng_mnfst_set[mnfstnode]
1624 clnode = msng_mnfst_set[mnfstnode]
1625 ndset = msng_filenode_set.setdefault(f, {})
1625 ndset = msng_filenode_set.setdefault(f, {})
1626 ndset.setdefault(fnode, clnode)
1626 ndset.setdefault(fnode, clnode)
1627 # Remember the revision we hope to see next.
1627 # Remember the revision we hope to see next.
1628 next_rev[0] = r + 1
1628 next_rev[0] = r + 1
1629 return collect_msng_filenodes
1629 return collect_msng_filenodes
1630
1630
1631 # We have a list of filenodes we think we need for a file, lets remove
1631 # We have a list of filenodes we think we need for a file, lets remove
1632 # all those we now the recipient must have.
1632 # all those we now the recipient must have.
1633 def prune_filenodes(f, filerevlog):
1633 def prune_filenodes(f, filerevlog):
1634 msngset = msng_filenode_set[f]
1634 msngset = msng_filenode_set[f]
1635 hasset = {}
1635 hasset = {}
1636 # If a 'missing' filenode thinks it belongs to a changenode we
1636 # If a 'missing' filenode thinks it belongs to a changenode we
1637 # assume the recipient must have, then the recipient must have
1637 # assume the recipient must have, then the recipient must have
1638 # that filenode.
1638 # that filenode.
1639 for n in msngset:
1639 for n in msngset:
1640 clnode = cl.node(filerevlog.linkrev(n))
1640 clnode = cl.node(filerevlog.linkrev(n))
1641 if clnode in has_cl_set:
1641 if clnode in has_cl_set:
1642 hasset[n] = 1
1642 hasset[n] = 1
1643 prune_parents(filerevlog, hasset, msngset)
1643 prune_parents(filerevlog, hasset, msngset)
1644
1644
1645 # A function generator function that sets up the a context for the
1645 # A function generator function that sets up the a context for the
1646 # inner function.
1646 # inner function.
1647 def lookup_filenode_link_func(fname):
1647 def lookup_filenode_link_func(fname):
1648 msngset = msng_filenode_set[fname]
1648 msngset = msng_filenode_set[fname]
1649 # Lookup the changenode the filenode belongs to.
1649 # Lookup the changenode the filenode belongs to.
1650 def lookup_filenode_link(fnode):
1650 def lookup_filenode_link(fnode):
1651 return msngset[fnode]
1651 return msngset[fnode]
1652 return lookup_filenode_link
1652 return lookup_filenode_link
1653
1653
1654 # Now that we have all theses utility functions to help out and
1654 # Now that we have all theses utility functions to help out and
1655 # logically divide up the task, generate the group.
1655 # logically divide up the task, generate the group.
1656 def gengroup():
1656 def gengroup():
1657 # The set of changed files starts empty.
1657 # The set of changed files starts empty.
1658 changedfiles = {}
1658 changedfiles = {}
1659 # Create a changenode group generator that will call our functions
1659 # Create a changenode group generator that will call our functions
1660 # back to lookup the owning changenode and collect information.
1660 # back to lookup the owning changenode and collect information.
1661 group = cl.group(msng_cl_lst, identity,
1661 group = cl.group(msng_cl_lst, identity,
1662 manifest_and_file_collector(changedfiles))
1662 manifest_and_file_collector(changedfiles))
1663 for chnk in group:
1663 for chnk in group:
1664 yield chnk
1664 yield chnk
1665
1665
1666 # The list of manifests has been collected by the generator
1666 # The list of manifests has been collected by the generator
1667 # calling our functions back.
1667 # calling our functions back.
1668 prune_manifests()
1668 prune_manifests()
1669 msng_mnfst_lst = msng_mnfst_set.keys()
1669 msng_mnfst_lst = msng_mnfst_set.keys()
1670 # Sort the manifestnodes by revision number.
1670 # Sort the manifestnodes by revision number.
1671 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1671 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1672 # Create a generator for the manifestnodes that calls our lookup
1672 # Create a generator for the manifestnodes that calls our lookup
1673 # and data collection functions back.
1673 # and data collection functions back.
1674 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1674 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1675 filenode_collector(changedfiles))
1675 filenode_collector(changedfiles))
1676 for chnk in group:
1676 for chnk in group:
1677 yield chnk
1677 yield chnk
1678
1678
1679 # These are no longer needed, dereference and toss the memory for
1679 # These are no longer needed, dereference and toss the memory for
1680 # them.
1680 # them.
1681 msng_mnfst_lst = None
1681 msng_mnfst_lst = None
1682 msng_mnfst_set.clear()
1682 msng_mnfst_set.clear()
1683
1683
1684 changedfiles = changedfiles.keys()
1684 changedfiles = changedfiles.keys()
1685 changedfiles.sort()
1685 changedfiles.sort()
1686 # Go through all our files in order sorted by name.
1686 # Go through all our files in order sorted by name.
1687 for fname in changedfiles:
1687 for fname in changedfiles:
1688 filerevlog = self.file(fname)
1688 filerevlog = self.file(fname)
1689 # Toss out the filenodes that the recipient isn't really
1689 # Toss out the filenodes that the recipient isn't really
1690 # missing.
1690 # missing.
1691 if msng_filenode_set.has_key(fname):
1691 if msng_filenode_set.has_key(fname):
1692 prune_filenodes(fname, filerevlog)
1692 prune_filenodes(fname, filerevlog)
1693 msng_filenode_lst = msng_filenode_set[fname].keys()
1693 msng_filenode_lst = msng_filenode_set[fname].keys()
1694 else:
1694 else:
1695 msng_filenode_lst = []
1695 msng_filenode_lst = []
1696 # If any filenodes are left, generate the group for them,
1696 # If any filenodes are left, generate the group for them,
1697 # otherwise don't bother.
1697 # otherwise don't bother.
1698 if len(msng_filenode_lst) > 0:
1698 if len(msng_filenode_lst) > 0:
1699 yield changegroup.genchunk(fname)
1699 yield changegroup.genchunk(fname)
1700 # Sort the filenodes by their revision #
1700 # Sort the filenodes by their revision #
1701 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1701 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1702 # Create a group generator and only pass in a changenode
1702 # Create a group generator and only pass in a changenode
1703 # lookup function as we need to collect no information
1703 # lookup function as we need to collect no information
1704 # from filenodes.
1704 # from filenodes.
1705 group = filerevlog.group(msng_filenode_lst,
1705 group = filerevlog.group(msng_filenode_lst,
1706 lookup_filenode_link_func(fname))
1706 lookup_filenode_link_func(fname))
1707 for chnk in group:
1707 for chnk in group:
1708 yield chnk
1708 yield chnk
1709 if msng_filenode_set.has_key(fname):
1709 if msng_filenode_set.has_key(fname):
1710 # Don't need this anymore, toss it to free memory.
1710 # Don't need this anymore, toss it to free memory.
1711 del msng_filenode_set[fname]
1711 del msng_filenode_set[fname]
1712 # Signal that no more groups are left.
1712 # Signal that no more groups are left.
1713 yield changegroup.closechunk()
1713 yield changegroup.closechunk()
1714
1714
1715 if msng_cl_lst:
1715 if msng_cl_lst:
1716 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1716 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1717
1717
1718 return util.chunkbuffer(gengroup())
1718 return util.chunkbuffer(gengroup())
1719
1719
1720 def changegroup(self, basenodes, source):
1720 def changegroup(self, basenodes, source):
1721 """Generate a changegroup of all nodes that we have that a recipient
1721 """Generate a changegroup of all nodes that we have that a recipient
1722 doesn't.
1722 doesn't.
1723
1723
1724 This is much easier than the previous function as we can assume that
1724 This is much easier than the previous function as we can assume that
1725 the recipient has any changenode we aren't sending them."""
1725 the recipient has any changenode we aren't sending them."""
1726
1726
1727 self.hook('preoutgoing', throw=True, source=source)
1727 self.hook('preoutgoing', throw=True, source=source)
1728
1728
1729 cl = self.changelog
1729 cl = self.changelog
1730 nodes = cl.nodesbetween(basenodes, None)[0]
1730 nodes = cl.nodesbetween(basenodes, None)[0]
1731 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1731 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1732 self.changegroupinfo(nodes)
1732 self.changegroupinfo(nodes)
1733
1733
1734 def identity(x):
1734 def identity(x):
1735 return x
1735 return x
1736
1736
1737 def gennodelst(revlog):
1737 def gennodelst(revlog):
1738 for r in xrange(0, revlog.count()):
1738 for r in xrange(0, revlog.count()):
1739 n = revlog.node(r)
1739 n = revlog.node(r)
1740 if revlog.linkrev(n) in revset:
1740 if revlog.linkrev(n) in revset:
1741 yield n
1741 yield n
1742
1742
1743 def changed_file_collector(changedfileset):
1743 def changed_file_collector(changedfileset):
1744 def collect_changed_files(clnode):
1744 def collect_changed_files(clnode):
1745 c = cl.read(clnode)
1745 c = cl.read(clnode)
1746 for fname in c[3]:
1746 for fname in c[3]:
1747 changedfileset[fname] = 1
1747 changedfileset[fname] = 1
1748 return collect_changed_files
1748 return collect_changed_files
1749
1749
1750 def lookuprevlink_func(revlog):
1750 def lookuprevlink_func(revlog):
1751 def lookuprevlink(n):
1751 def lookuprevlink(n):
1752 return cl.node(revlog.linkrev(n))
1752 return cl.node(revlog.linkrev(n))
1753 return lookuprevlink
1753 return lookuprevlink
1754
1754
1755 def gengroup():
1755 def gengroup():
1756 # construct a list of all changed files
1756 # construct a list of all changed files
1757 changedfiles = {}
1757 changedfiles = {}
1758
1758
1759 for chnk in cl.group(nodes, identity,
1759 for chnk in cl.group(nodes, identity,
1760 changed_file_collector(changedfiles)):
1760 changed_file_collector(changedfiles)):
1761 yield chnk
1761 yield chnk
1762 changedfiles = changedfiles.keys()
1762 changedfiles = changedfiles.keys()
1763 changedfiles.sort()
1763 changedfiles.sort()
1764
1764
1765 mnfst = self.manifest
1765 mnfst = self.manifest
1766 nodeiter = gennodelst(mnfst)
1766 nodeiter = gennodelst(mnfst)
1767 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1767 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1768 yield chnk
1768 yield chnk
1769
1769
1770 for fname in changedfiles:
1770 for fname in changedfiles:
1771 filerevlog = self.file(fname)
1771 filerevlog = self.file(fname)
1772 nodeiter = gennodelst(filerevlog)
1772 nodeiter = gennodelst(filerevlog)
1773 nodeiter = list(nodeiter)
1773 nodeiter = list(nodeiter)
1774 if nodeiter:
1774 if nodeiter:
1775 yield changegroup.genchunk(fname)
1775 yield changegroup.genchunk(fname)
1776 lookup = lookuprevlink_func(filerevlog)
1776 lookup = lookuprevlink_func(filerevlog)
1777 for chnk in filerevlog.group(nodeiter, lookup):
1777 for chnk in filerevlog.group(nodeiter, lookup):
1778 yield chnk
1778 yield chnk
1779
1779
1780 yield changegroup.closechunk()
1780 yield changegroup.closechunk()
1781
1781
1782 if nodes:
1782 if nodes:
1783 self.hook('outgoing', node=hex(nodes[0]), source=source)
1783 self.hook('outgoing', node=hex(nodes[0]), source=source)
1784
1784
1785 return util.chunkbuffer(gengroup())
1785 return util.chunkbuffer(gengroup())
1786
1786
1787 def addchangegroup(self, source, srctype, url):
1787 def addchangegroup(self, source, srctype, url):
1788 """add changegroup to repo.
1788 """add changegroup to repo.
1789
1789
1790 return values:
1790 return values:
1791 - nothing changed or no source: 0
1791 - nothing changed or no source: 0
1792 - more heads than before: 1+added heads (2..n)
1792 - more heads than before: 1+added heads (2..n)
1793 - less heads than before: -1-removed heads (-2..-n)
1793 - less heads than before: -1-removed heads (-2..-n)
1794 - number of heads stays the same: 1
1794 - number of heads stays the same: 1
1795 """
1795 """
1796 def csmap(x):
1796 def csmap(x):
1797 self.ui.debug(_("add changeset %s\n") % short(x))
1797 self.ui.debug(_("add changeset %s\n") % short(x))
1798 return cl.count()
1798 return cl.count()
1799
1799
1800 def revmap(x):
1800 def revmap(x):
1801 return cl.rev(x)
1801 return cl.rev(x)
1802
1802
1803 if not source:
1803 if not source:
1804 return 0
1804 return 0
1805
1805
1806 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1806 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1807
1807
1808 changesets = files = revisions = 0
1808 changesets = files = revisions = 0
1809
1809
1810 tr = self.transaction()
1810 tr = self.transaction()
1811
1811
1812 # write changelog data to temp files so concurrent readers will not see
1812 # write changelog data to temp files so concurrent readers will not see
1813 # inconsistent view
1813 # inconsistent view
1814 cl = None
1814 cl = None
1815 try:
1815 try:
1816 cl = appendfile.appendchangelog(self.sopener,
1816 cl = appendfile.appendchangelog(self.sopener,
1817 self.changelog.version)
1817 self.changelog.version)
1818
1818
1819 oldheads = len(cl.heads())
1819 oldheads = len(cl.heads())
1820
1820
1821 # pull off the changeset group
1821 # pull off the changeset group
1822 self.ui.status(_("adding changesets\n"))
1822 self.ui.status(_("adding changesets\n"))
1823 cor = cl.count() - 1
1823 cor = cl.count() - 1
1824 chunkiter = changegroup.chunkiter(source)
1824 chunkiter = changegroup.chunkiter(source)
1825 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1825 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1826 raise util.Abort(_("received changelog group is empty"))
1826 raise util.Abort(_("received changelog group is empty"))
1827 cnr = cl.count() - 1
1827 cnr = cl.count() - 1
1828 changesets = cnr - cor
1828 changesets = cnr - cor
1829
1829
1830 # pull off the manifest group
1830 # pull off the manifest group
1831 self.ui.status(_("adding manifests\n"))
1831 self.ui.status(_("adding manifests\n"))
1832 chunkiter = changegroup.chunkiter(source)
1832 chunkiter = changegroup.chunkiter(source)
1833 # no need to check for empty manifest group here:
1833 # no need to check for empty manifest group here:
1834 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1834 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1835 # no new manifest will be created and the manifest group will
1835 # no new manifest will be created and the manifest group will
1836 # be empty during the pull
1836 # be empty during the pull
1837 self.manifest.addgroup(chunkiter, revmap, tr)
1837 self.manifest.addgroup(chunkiter, revmap, tr)
1838
1838
1839 # process the files
1839 # process the files
1840 self.ui.status(_("adding file changes\n"))
1840 self.ui.status(_("adding file changes\n"))
1841 while 1:
1841 while 1:
1842 f = changegroup.getchunk(source)
1842 f = changegroup.getchunk(source)
1843 if not f:
1843 if not f:
1844 break
1844 break
1845 self.ui.debug(_("adding %s revisions\n") % f)
1845 self.ui.debug(_("adding %s revisions\n") % f)
1846 fl = self.file(f)
1846 fl = self.file(f)
1847 o = fl.count()
1847 o = fl.count()
1848 chunkiter = changegroup.chunkiter(source)
1848 chunkiter = changegroup.chunkiter(source)
1849 if fl.addgroup(chunkiter, revmap, tr) is None:
1849 if fl.addgroup(chunkiter, revmap, tr) is None:
1850 raise util.Abort(_("received file revlog group is empty"))
1850 raise util.Abort(_("received file revlog group is empty"))
1851 revisions += fl.count() - o
1851 revisions += fl.count() - o
1852 files += 1
1852 files += 1
1853
1853
1854 cl.writedata()
1854 cl.writedata()
1855 finally:
1855 finally:
1856 if cl:
1856 if cl:
1857 cl.cleanup()
1857 cl.cleanup()
1858
1858
1859 # make changelog see real files again
1859 # make changelog see real files again
1860 self.changelog = changelog.changelog(self.sopener,
1860 self.changelog = changelog.changelog(self.sopener,
1861 self.changelog.version)
1861 self.changelog.version)
1862 self.changelog.checkinlinesize(tr)
1862 self.changelog.checkinlinesize(tr)
1863
1863
1864 newheads = len(self.changelog.heads())
1864 newheads = len(self.changelog.heads())
1865 heads = ""
1865 heads = ""
1866 if oldheads and newheads != oldheads:
1866 if oldheads and newheads != oldheads:
1867 heads = _(" (%+d heads)") % (newheads - oldheads)
1867 heads = _(" (%+d heads)") % (newheads - oldheads)
1868
1868
1869 self.ui.status(_("added %d changesets"
1869 self.ui.status(_("added %d changesets"
1870 " with %d changes to %d files%s\n")
1870 " with %d changes to %d files%s\n")
1871 % (changesets, revisions, files, heads))
1871 % (changesets, revisions, files, heads))
1872
1872
1873 if changesets > 0:
1873 if changesets > 0:
1874 self.hook('pretxnchangegroup', throw=True,
1874 self.hook('pretxnchangegroup', throw=True,
1875 node=hex(self.changelog.node(cor+1)), source=srctype,
1875 node=hex(self.changelog.node(cor+1)), source=srctype,
1876 url=url)
1876 url=url)
1877
1877
1878 tr.close()
1878 tr.close()
1879
1879
1880 if changesets > 0:
1880 if changesets > 0:
1881 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1881 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1882 source=srctype, url=url)
1882 source=srctype, url=url)
1883
1883
1884 for i in xrange(cor + 1, cnr + 1):
1884 for i in xrange(cor + 1, cnr + 1):
1885 self.hook("incoming", node=hex(self.changelog.node(i)),
1885 self.hook("incoming", node=hex(self.changelog.node(i)),
1886 source=srctype, url=url)
1886 source=srctype, url=url)
1887
1887
1888 # never return 0 here:
1888 # never return 0 here:
1889 if newheads < oldheads:
1889 if newheads < oldheads:
1890 return newheads - oldheads - 1
1890 return newheads - oldheads - 1
1891 else:
1891 else:
1892 return newheads - oldheads + 1
1892 return newheads - oldheads + 1
1893
1893
1894
1894
1895 def stream_in(self, remote):
1895 def stream_in(self, remote):
1896 fp = remote.stream_out()
1896 fp = remote.stream_out()
1897 l = fp.readline()
1897 l = fp.readline()
1898 try:
1898 try:
1899 resp = int(l)
1899 resp = int(l)
1900 except ValueError:
1900 except ValueError:
1901 raise util.UnexpectedOutput(
1901 raise util.UnexpectedOutput(
1902 _('Unexpected response from remote server:'), l)
1902 _('Unexpected response from remote server:'), l)
1903 if resp == 1:
1903 if resp == 1:
1904 raise util.Abort(_('operation forbidden by server'))
1904 raise util.Abort(_('operation forbidden by server'))
1905 elif resp == 2:
1905 elif resp == 2:
1906 raise util.Abort(_('locking the remote repository failed'))
1906 raise util.Abort(_('locking the remote repository failed'))
1907 elif resp != 0:
1907 elif resp != 0:
1908 raise util.Abort(_('the server sent an unknown error code'))
1908 raise util.Abort(_('the server sent an unknown error code'))
1909 self.ui.status(_('streaming all changes\n'))
1909 self.ui.status(_('streaming all changes\n'))
1910 l = fp.readline()
1910 l = fp.readline()
1911 try:
1911 try:
1912 total_files, total_bytes = map(int, l.split(' ', 1))
1912 total_files, total_bytes = map(int, l.split(' ', 1))
1913 except ValueError, TypeError:
1913 except ValueError, TypeError:
1914 raise util.UnexpectedOutput(
1914 raise util.UnexpectedOutput(
1915 _('Unexpected response from remote server:'), l)
1915 _('Unexpected response from remote server:'), l)
1916 self.ui.status(_('%d files to transfer, %s of data\n') %
1916 self.ui.status(_('%d files to transfer, %s of data\n') %
1917 (total_files, util.bytecount(total_bytes)))
1917 (total_files, util.bytecount(total_bytes)))
1918 start = time.time()
1918 start = time.time()
1919 for i in xrange(total_files):
1919 for i in xrange(total_files):
1920 # XXX doesn't support '\n' or '\r' in filenames
1920 # XXX doesn't support '\n' or '\r' in filenames
1921 l = fp.readline()
1921 l = fp.readline()
1922 try:
1922 try:
1923 name, size = l.split('\0', 1)
1923 name, size = l.split('\0', 1)
1924 size = int(size)
1924 size = int(size)
1925 except ValueError, TypeError:
1925 except ValueError, TypeError:
1926 raise util.UnexpectedOutput(
1926 raise util.UnexpectedOutput(
1927 _('Unexpected response from remote server:'), l)
1927 _('Unexpected response from remote server:'), l)
1928 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1928 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1929 ofp = self.sopener(name, 'w')
1929 ofp = self.sopener(name, 'w')
1930 for chunk in util.filechunkiter(fp, limit=size):
1930 for chunk in util.filechunkiter(fp, limit=size):
1931 ofp.write(chunk)
1931 ofp.write(chunk)
1932 ofp.close()
1932 ofp.close()
1933 elapsed = time.time() - start
1933 elapsed = time.time() - start
1934 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1934 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1935 (util.bytecount(total_bytes), elapsed,
1935 (util.bytecount(total_bytes), elapsed,
1936 util.bytecount(total_bytes / elapsed)))
1936 util.bytecount(total_bytes / elapsed)))
1937 self.reload()
1937 self.reload()
1938 return len(self.heads()) + 1
1938 return len(self.heads()) + 1
1939
1939
1940 def clone(self, remote, heads=[], stream=False):
1940 def clone(self, remote, heads=[], stream=False):
1941 '''clone remote repository.
1941 '''clone remote repository.
1942
1942
1943 keyword arguments:
1943 keyword arguments:
1944 heads: list of revs to clone (forces use of pull)
1944 heads: list of revs to clone (forces use of pull)
1945 stream: use streaming clone if possible'''
1945 stream: use streaming clone if possible'''
1946
1946
1947 # now, all clients that can request uncompressed clones can
1947 # now, all clients that can request uncompressed clones can
1948 # read repo formats supported by all servers that can serve
1948 # read repo formats supported by all servers that can serve
1949 # them.
1949 # them.
1950
1950
1951 # if revlog format changes, client will have to check version
1951 # if revlog format changes, client will have to check version
1952 # and format flags on "stream" capability, and use
1952 # and format flags on "stream" capability, and use
1953 # uncompressed only if compatible.
1953 # uncompressed only if compatible.
1954
1954
1955 if stream and not heads and remote.capable('stream'):
1955 if stream and not heads and remote.capable('stream'):
1956 return self.stream_in(remote)
1956 return self.stream_in(remote)
1957 return self.pull(remote, heads)
1957 return self.pull(remote, heads)
1958
1958
1959 # used to avoid circular references so destructors work
1959 # used to avoid circular references so destructors work
1960 def aftertrans(files):
1960 def aftertrans(files):
1961 renamefiles = [tuple(t) for t in files]
1961 renamefiles = [tuple(t) for t in files]
1962 def a():
1962 def a():
1963 for src, dest in renamefiles:
1963 for src, dest in renamefiles:
1964 util.rename(src, dest)
1964 util.rename(src, dest)
1965 return a
1965 return a
1966
1966
1967 def instance(ui, path, create):
1967 def instance(ui, path, create):
1968 return localrepository(ui, util.drop_scheme('file', path), create)
1968 return localrepository(ui, util.drop_scheme('file', path), create)
1969
1969
1970 def islocal(path):
1970 def islocal(path):
1971 return True
1971 return True
@@ -1,1286 +1,1292 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii changegroup errno ancestor mdiff os")
16 demandload(globals(), "binascii changegroup errno ancestor mdiff os")
17 demandload(globals(), "sha struct util zlib")
17 demandload(globals(), "sha struct util zlib")
18
18
19 # revlog version strings
19 # revlog version strings
20 REVLOGV0 = 0
20 REVLOGV0 = 0
21 REVLOGNG = 1
21 REVLOGNG = 1
22
22
23 # revlog flags
23 # revlog flags
24 REVLOGNGINLINEDATA = (1 << 16)
24 REVLOGNGINLINEDATA = (1 << 16)
25 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
25 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
26
26
27 REVLOG_DEFAULT_FORMAT = REVLOGNG
27 REVLOG_DEFAULT_FORMAT = REVLOGNG
28 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
28 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
29
29
30 def flagstr(flag):
30 def flagstr(flag):
31 if flag == "inline":
31 if flag == "inline":
32 return REVLOGNGINLINEDATA
32 return REVLOGNGINLINEDATA
33 raise RevlogError(_("unknown revlog flag %s") % flag)
33 raise RevlogError(_("unknown revlog flag %s") % flag)
34
34
35 def hash(text, p1, p2):
35 def hash(text, p1, p2):
36 """generate a hash from the given text and its parent hashes
36 """generate a hash from the given text and its parent hashes
37
37
38 This hash combines both the current file contents and its history
38 This hash combines both the current file contents and its history
39 in a manner that makes it easy to distinguish nodes with the same
39 in a manner that makes it easy to distinguish nodes with the same
40 content in the revision graph.
40 content in the revision graph.
41 """
41 """
42 l = [p1, p2]
42 l = [p1, p2]
43 l.sort()
43 l.sort()
44 s = sha.new(l[0])
44 s = sha.new(l[0])
45 s.update(l[1])
45 s.update(l[1])
46 s.update(text)
46 s.update(text)
47 return s.digest()
47 return s.digest()
48
48
49 def compress(text):
49 def compress(text):
50 """ generate a possibly-compressed representation of text """
50 """ generate a possibly-compressed representation of text """
51 if not text: return ("", text)
51 if not text: return ("", text)
52 if len(text) < 44:
52 if len(text) < 44:
53 if text[0] == '\0': return ("", text)
53 if text[0] == '\0': return ("", text)
54 return ('u', text)
54 return ('u', text)
55 bin = zlib.compress(text)
55 bin = zlib.compress(text)
56 if len(bin) > len(text):
56 if len(bin) > len(text):
57 if text[0] == '\0': return ("", text)
57 if text[0] == '\0': return ("", text)
58 return ('u', text)
58 return ('u', text)
59 return ("", bin)
59 return ("", bin)
60
60
61 def decompress(bin):
61 def decompress(bin):
62 """ decompress the given input """
62 """ decompress the given input """
63 if not bin: return bin
63 if not bin: return bin
64 t = bin[0]
64 t = bin[0]
65 if t == '\0': return bin
65 if t == '\0': return bin
66 if t == 'x': return zlib.decompress(bin)
66 if t == 'x': return zlib.decompress(bin)
67 if t == 'u': return bin[1:]
67 if t == 'u': return bin[1:]
68 raise RevlogError(_("unknown compression type %r") % t)
68 raise RevlogError(_("unknown compression type %r") % t)
69
69
70 indexformatv0 = ">4l20s20s20s"
70 indexformatv0 = ">4l20s20s20s"
71 v0shaoffset = 56
71 v0shaoffset = 56
72 # index ng:
72 # index ng:
73 # 6 bytes offset
73 # 6 bytes offset
74 # 2 bytes flags
74 # 2 bytes flags
75 # 4 bytes compressed length
75 # 4 bytes compressed length
76 # 4 bytes uncompressed length
76 # 4 bytes uncompressed length
77 # 4 bytes: base rev
77 # 4 bytes: base rev
78 # 4 bytes link rev
78 # 4 bytes link rev
79 # 4 bytes parent 1 rev
79 # 4 bytes parent 1 rev
80 # 4 bytes parent 2 rev
80 # 4 bytes parent 2 rev
81 # 32 bytes: nodeid
81 # 32 bytes: nodeid
82 indexformatng = ">Qiiiiii20s12x"
82 indexformatng = ">Qiiiiii20s12x"
83 ngshaoffset = 32
83 ngshaoffset = 32
84 versionformat = ">I"
84 versionformat = ">I"
85
85
86 class lazyparser(object):
86 class lazyparser(object):
87 """
87 """
88 this class avoids the need to parse the entirety of large indices
88 this class avoids the need to parse the entirety of large indices
89 """
89 """
90
90
91 # lazyparser is not safe to use on windows if win32 extensions not
91 # lazyparser is not safe to use on windows if win32 extensions not
92 # available. it keeps file handle open, which make it not possible
92 # available. it keeps file handle open, which make it not possible
93 # to break hardlinks on local cloned repos.
93 # to break hardlinks on local cloned repos.
94 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
94 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
95 hasattr(util, 'win32api'))
95 hasattr(util, 'win32api'))
96
96
97 def __init__(self, dataf, size, indexformat, shaoffset):
97 def __init__(self, dataf, size, indexformat, shaoffset):
98 self.dataf = dataf
98 self.dataf = dataf
99 self.format = indexformat
99 self.format = indexformat
100 self.s = struct.calcsize(indexformat)
100 self.s = struct.calcsize(indexformat)
101 self.indexformat = indexformat
101 self.indexformat = indexformat
102 self.datasize = size
102 self.datasize = size
103 self.l = size/self.s
103 self.l = size/self.s
104 self.index = [None] * self.l
104 self.index = [None] * self.l
105 self.map = {nullid: nullrev}
105 self.map = {nullid: nullrev}
106 self.allmap = 0
106 self.allmap = 0
107 self.all = 0
107 self.all = 0
108 self.mapfind_count = 0
108 self.mapfind_count = 0
109 self.shaoffset = shaoffset
109 self.shaoffset = shaoffset
110
110
111 def loadmap(self):
111 def loadmap(self):
112 """
112 """
113 during a commit, we need to make sure the rev being added is
113 during a commit, we need to make sure the rev being added is
114 not a duplicate. This requires loading the entire index,
114 not a duplicate. This requires loading the entire index,
115 which is fairly slow. loadmap can load up just the node map,
115 which is fairly slow. loadmap can load up just the node map,
116 which takes much less time.
116 which takes much less time.
117 """
117 """
118 if self.allmap: return
118 if self.allmap: return
119 end = self.datasize
119 end = self.datasize
120 self.allmap = 1
120 self.allmap = 1
121 cur = 0
121 cur = 0
122 count = 0
122 count = 0
123 blocksize = self.s * 256
123 blocksize = self.s * 256
124 self.dataf.seek(0)
124 self.dataf.seek(0)
125 while cur < end:
125 while cur < end:
126 data = self.dataf.read(blocksize)
126 data = self.dataf.read(blocksize)
127 off = 0
127 off = 0
128 for x in xrange(256):
128 for x in xrange(256):
129 n = data[off + self.shaoffset:off + self.shaoffset + 20]
129 n = data[off + self.shaoffset:off + self.shaoffset + 20]
130 self.map[n] = count
130 self.map[n] = count
131 count += 1
131 count += 1
132 if count >= self.l:
132 if count >= self.l:
133 break
133 break
134 off += self.s
134 off += self.s
135 cur += blocksize
135 cur += blocksize
136
136
137 def loadblock(self, blockstart, blocksize, data=None):
137 def loadblock(self, blockstart, blocksize, data=None):
138 if self.all: return
138 if self.all: return
139 if data is None:
139 if data is None:
140 self.dataf.seek(blockstart)
140 self.dataf.seek(blockstart)
141 if blockstart + blocksize > self.datasize:
141 if blockstart + blocksize > self.datasize:
142 # the revlog may have grown since we've started running,
142 # the revlog may have grown since we've started running,
143 # but we don't have space in self.index for more entries.
143 # but we don't have space in self.index for more entries.
144 # limit blocksize so that we don't get too much data.
144 # limit blocksize so that we don't get too much data.
145 blocksize = max(self.datasize - blockstart, 0)
145 blocksize = max(self.datasize - blockstart, 0)
146 data = self.dataf.read(blocksize)
146 data = self.dataf.read(blocksize)
147 lend = len(data) / self.s
147 lend = len(data) / self.s
148 i = blockstart / self.s
148 i = blockstart / self.s
149 off = 0
149 off = 0
150 for x in xrange(lend):
150 for x in xrange(lend):
151 if self.index[i + x] == None:
151 if self.index[i + x] == None:
152 b = data[off : off + self.s]
152 b = data[off : off + self.s]
153 self.index[i + x] = b
153 self.index[i + x] = b
154 n = b[self.shaoffset:self.shaoffset + 20]
154 n = b[self.shaoffset:self.shaoffset + 20]
155 self.map[n] = i + x
155 self.map[n] = i + x
156 off += self.s
156 off += self.s
157
157
158 def findnode(self, node):
158 def findnode(self, node):
159 """search backwards through the index file for a specific node"""
159 """search backwards through the index file for a specific node"""
160 if self.allmap: return None
160 if self.allmap: return None
161
161
162 # hg log will cause many many searches for the manifest
162 # hg log will cause many many searches for the manifest
163 # nodes. After we get called a few times, just load the whole
163 # nodes. After we get called a few times, just load the whole
164 # thing.
164 # thing.
165 if self.mapfind_count > 8:
165 if self.mapfind_count > 8:
166 self.loadmap()
166 self.loadmap()
167 if node in self.map:
167 if node in self.map:
168 return node
168 return node
169 return None
169 return None
170 self.mapfind_count += 1
170 self.mapfind_count += 1
171 last = self.l - 1
171 last = self.l - 1
172 while self.index[last] != None:
172 while self.index[last] != None:
173 if last == 0:
173 if last == 0:
174 self.all = 1
174 self.all = 1
175 self.allmap = 1
175 self.allmap = 1
176 return None
176 return None
177 last -= 1
177 last -= 1
178 end = (last + 1) * self.s
178 end = (last + 1) * self.s
179 blocksize = self.s * 256
179 blocksize = self.s * 256
180 while end >= 0:
180 while end >= 0:
181 start = max(end - blocksize, 0)
181 start = max(end - blocksize, 0)
182 self.dataf.seek(start)
182 self.dataf.seek(start)
183 data = self.dataf.read(end - start)
183 data = self.dataf.read(end - start)
184 findend = end - start
184 findend = end - start
185 while True:
185 while True:
186 # we're searching backwards, so weh have to make sure
186 # we're searching backwards, so weh have to make sure
187 # we don't find a changeset where this node is a parent
187 # we don't find a changeset where this node is a parent
188 off = data.rfind(node, 0, findend)
188 off = data.rfind(node, 0, findend)
189 findend = off
189 findend = off
190 if off >= 0:
190 if off >= 0:
191 i = off / self.s
191 i = off / self.s
192 off = i * self.s
192 off = i * self.s
193 n = data[off + self.shaoffset:off + self.shaoffset + 20]
193 n = data[off + self.shaoffset:off + self.shaoffset + 20]
194 if n == node:
194 if n == node:
195 self.map[n] = i + start / self.s
195 self.map[n] = i + start / self.s
196 return node
196 return node
197 else:
197 else:
198 break
198 break
199 end -= blocksize
199 end -= blocksize
200 return None
200 return None
201
201
202 def loadindex(self, i=None, end=None):
202 def loadindex(self, i=None, end=None):
203 if self.all: return
203 if self.all: return
204 all = False
204 all = False
205 if i == None:
205 if i == None:
206 blockstart = 0
206 blockstart = 0
207 blocksize = (512 / self.s) * self.s
207 blocksize = (512 / self.s) * self.s
208 end = self.datasize
208 end = self.datasize
209 all = True
209 all = True
210 else:
210 else:
211 if end:
211 if end:
212 blockstart = i * self.s
212 blockstart = i * self.s
213 end = end * self.s
213 end = end * self.s
214 blocksize = end - blockstart
214 blocksize = end - blockstart
215 else:
215 else:
216 blockstart = (i & ~(32)) * self.s
216 blockstart = (i & ~(32)) * self.s
217 blocksize = self.s * 64
217 blocksize = self.s * 64
218 end = blockstart + blocksize
218 end = blockstart + blocksize
219 while blockstart < end:
219 while blockstart < end:
220 self.loadblock(blockstart, blocksize)
220 self.loadblock(blockstart, blocksize)
221 blockstart += blocksize
221 blockstart += blocksize
222 if all: self.all = True
222 if all: self.all = True
223
223
224 class lazyindex(object):
224 class lazyindex(object):
225 """a lazy version of the index array"""
225 """a lazy version of the index array"""
226 def __init__(self, parser):
226 def __init__(self, parser):
227 self.p = parser
227 self.p = parser
228 def __len__(self):
228 def __len__(self):
229 return len(self.p.index)
229 return len(self.p.index)
230 def load(self, pos):
230 def load(self, pos):
231 if pos < 0:
231 if pos < 0:
232 pos += len(self.p.index)
232 pos += len(self.p.index)
233 self.p.loadindex(pos)
233 self.p.loadindex(pos)
234 return self.p.index[pos]
234 return self.p.index[pos]
235 def __getitem__(self, pos):
235 def __getitem__(self, pos):
236 ret = self.p.index[pos] or self.load(pos)
236 ret = self.p.index[pos] or self.load(pos)
237 if isinstance(ret, str):
237 if isinstance(ret, str):
238 ret = struct.unpack(self.p.indexformat, ret)
238 ret = struct.unpack(self.p.indexformat, ret)
239 return ret
239 return ret
240 def __setitem__(self, pos, item):
240 def __setitem__(self, pos, item):
241 self.p.index[pos] = item
241 self.p.index[pos] = item
242 def __delitem__(self, pos):
242 def __delitem__(self, pos):
243 del self.p.index[pos]
243 del self.p.index[pos]
244 def append(self, e):
244 def append(self, e):
245 self.p.index.append(e)
245 self.p.index.append(e)
246
246
247 class lazymap(object):
247 class lazymap(object):
248 """a lazy version of the node map"""
248 """a lazy version of the node map"""
249 def __init__(self, parser):
249 def __init__(self, parser):
250 self.p = parser
250 self.p = parser
251 def load(self, key):
251 def load(self, key):
252 n = self.p.findnode(key)
252 n = self.p.findnode(key)
253 if n == None:
253 if n == None:
254 raise KeyError(key)
254 raise KeyError(key)
255 def __contains__(self, key):
255 def __contains__(self, key):
256 if key in self.p.map:
256 if key in self.p.map:
257 return True
257 return True
258 self.p.loadmap()
258 self.p.loadmap()
259 return key in self.p.map
259 return key in self.p.map
260 def __iter__(self):
260 def __iter__(self):
261 yield nullid
261 yield nullid
262 for i in xrange(self.p.l):
262 for i in xrange(self.p.l):
263 ret = self.p.index[i]
263 ret = self.p.index[i]
264 if not ret:
264 if not ret:
265 self.p.loadindex(i)
265 self.p.loadindex(i)
266 ret = self.p.index[i]
266 ret = self.p.index[i]
267 if isinstance(ret, str):
267 if isinstance(ret, str):
268 ret = struct.unpack(self.p.indexformat, ret)
268 ret = struct.unpack(self.p.indexformat, ret)
269 yield ret[-1]
269 yield ret[-1]
270 def __getitem__(self, key):
270 def __getitem__(self, key):
271 try:
271 try:
272 return self.p.map[key]
272 return self.p.map[key]
273 except KeyError:
273 except KeyError:
274 try:
274 try:
275 self.load(key)
275 self.load(key)
276 return self.p.map[key]
276 return self.p.map[key]
277 except KeyError:
277 except KeyError:
278 raise KeyError("node " + hex(key))
278 raise KeyError("node " + hex(key))
279 def __setitem__(self, key, val):
279 def __setitem__(self, key, val):
280 self.p.map[key] = val
280 self.p.map[key] = val
281 def __delitem__(self, key):
281 def __delitem__(self, key):
282 del self.p.map[key]
282 del self.p.map[key]
283
283
284 class RevlogError(Exception): pass
284 class RevlogError(Exception): pass
285
285
286 class revlog(object):
286 class revlog(object):
287 """
287 """
288 the underlying revision storage object
288 the underlying revision storage object
289
289
290 A revlog consists of two parts, an index and the revision data.
290 A revlog consists of two parts, an index and the revision data.
291
291
292 The index is a file with a fixed record size containing
292 The index is a file with a fixed record size containing
293 information on each revision, includings its nodeid (hash), the
293 information on each revision, includings its nodeid (hash), the
294 nodeids of its parents, the position and offset of its data within
294 nodeids of its parents, the position and offset of its data within
295 the data file, and the revision it's based on. Finally, each entry
295 the data file, and the revision it's based on. Finally, each entry
296 contains a linkrev entry that can serve as a pointer to external
296 contains a linkrev entry that can serve as a pointer to external
297 data.
297 data.
298
298
299 The revision data itself is a linear collection of data chunks.
299 The revision data itself is a linear collection of data chunks.
300 Each chunk represents a revision and is usually represented as a
300 Each chunk represents a revision and is usually represented as a
301 delta against the previous chunk. To bound lookup time, runs of
301 delta against the previous chunk. To bound lookup time, runs of
302 deltas are limited to about 2 times the length of the original
302 deltas are limited to about 2 times the length of the original
303 version data. This makes retrieval of a version proportional to
303 version data. This makes retrieval of a version proportional to
304 its size, or O(1) relative to the number of revisions.
304 its size, or O(1) relative to the number of revisions.
305
305
306 Both pieces of the revlog are written to in an append-only
306 Both pieces of the revlog are written to in an append-only
307 fashion, which means we never need to rewrite a file to insert or
307 fashion, which means we never need to rewrite a file to insert or
308 remove data, and can use some simple techniques to avoid the need
308 remove data, and can use some simple techniques to avoid the need
309 for locking while reading.
309 for locking while reading.
310 """
310 """
311 def __init__(self, opener, indexfile, datafile,
311 def __init__(self, opener, indexfile, datafile,
312 defversion=REVLOG_DEFAULT_VERSION):
312 defversion=REVLOG_DEFAULT_VERSION):
313 """
313 """
314 create a revlog object
314 create a revlog object
315
315
316 opener is a function that abstracts the file opening operation
316 opener is a function that abstracts the file opening operation
317 and can be used to implement COW semantics or the like.
317 and can be used to implement COW semantics or the like.
318 """
318 """
319 self.indexfile = indexfile
319 self.indexfile = indexfile
320 self.datafile = datafile
320 self.datafile = datafile
321 self.opener = opener
321 self.opener = opener
322
322
323 self.indexstat = None
323 self.indexstat = None
324 self.cache = None
324 self.cache = None
325 self.chunkcache = None
325 self.chunkcache = None
326 self.defversion = defversion
326 self.defversion = defversion
327 self.load()
327 self.load()
328
328
329 def load(self):
329 def load(self):
330 v = self.defversion
330 v = self.defversion
331 try:
331 try:
332 f = self.opener(self.indexfile)
332 f = self.opener(self.indexfile)
333 i = f.read(4)
333 i = f.read(4)
334 f.seek(0)
334 f.seek(0)
335 except IOError, inst:
335 except IOError, inst:
336 if inst.errno != errno.ENOENT:
336 if inst.errno != errno.ENOENT:
337 raise
337 raise
338 i = ""
338 i = ""
339 else:
339 else:
340 try:
340 try:
341 st = util.fstat(f)
341 st = util.fstat(f)
342 except AttributeError, inst:
342 except AttributeError, inst:
343 st = None
343 st = None
344 else:
344 else:
345 oldst = self.indexstat
345 oldst = self.indexstat
346 if (oldst and st.st_dev == oldst.st_dev
346 if (oldst and st.st_dev == oldst.st_dev
347 and st.st_ino == oldst.st_ino
347 and st.st_ino == oldst.st_ino
348 and st.st_mtime == oldst.st_mtime
348 and st.st_mtime == oldst.st_mtime
349 and st.st_ctime == oldst.st_ctime):
349 and st.st_ctime == oldst.st_ctime):
350 return
350 return
351 self.indexstat = st
351 self.indexstat = st
352 if len(i) > 0:
352 if len(i) > 0:
353 v = struct.unpack(versionformat, i)[0]
353 v = struct.unpack(versionformat, i)[0]
354 flags = v & ~0xFFFF
354 flags = v & ~0xFFFF
355 fmt = v & 0xFFFF
355 fmt = v & 0xFFFF
356 if fmt == REVLOGV0:
356 if fmt == REVLOGV0:
357 if flags:
357 if flags:
358 raise RevlogError(_("index %s unknown flags %#04x for format v0")
358 raise RevlogError(_("index %s unknown flags %#04x for format v0")
359 % (self.indexfile, flags >> 16))
359 % (self.indexfile, flags >> 16))
360 elif fmt == REVLOGNG:
360 elif fmt == REVLOGNG:
361 if flags & ~REVLOGNGINLINEDATA:
361 if flags & ~REVLOGNGINLINEDATA:
362 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
362 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
363 % (self.indexfile, flags >> 16))
363 % (self.indexfile, flags >> 16))
364 else:
364 else:
365 raise RevlogError(_("index %s unknown format %d")
365 raise RevlogError(_("index %s unknown format %d")
366 % (self.indexfile, fmt))
366 % (self.indexfile, fmt))
367 self.version = v
367 self.version = v
368 if v == REVLOGV0:
368 if v == REVLOGV0:
369 self.indexformat = indexformatv0
369 self.indexformat = indexformatv0
370 shaoffset = v0shaoffset
370 shaoffset = v0shaoffset
371 else:
371 else:
372 self.indexformat = indexformatng
372 self.indexformat = indexformatng
373 shaoffset = ngshaoffset
373 shaoffset = ngshaoffset
374
374
375 if i:
375 if i:
376 if (lazyparser.safe_to_use and not self.inlinedata() and
376 if (lazyparser.safe_to_use and not self.inlinedata() and
377 st and st.st_size > 10000):
377 st and st.st_size > 10000):
378 # big index, let's parse it on demand
378 # big index, let's parse it on demand
379 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
379 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
380 self.index = lazyindex(parser)
380 self.index = lazyindex(parser)
381 self.nodemap = lazymap(parser)
381 self.nodemap = lazymap(parser)
382 else:
382 else:
383 self.parseindex(f, st)
383 self.parseindex(f, st)
384 if self.version != REVLOGV0:
384 if self.version != REVLOGV0:
385 e = list(self.index[0])
385 e = list(self.index[0])
386 type = self.ngtype(e[0])
386 type = self.ngtype(e[0])
387 e[0] = self.offset_type(0, type)
387 e[0] = self.offset_type(0, type)
388 self.index[0] = e
388 self.index[0] = e
389 else:
389 else:
390 self.nodemap = {nullid: nullrev}
390 self.nodemap = {nullid: nullrev}
391 self.index = []
391 self.index = []
392
392
393
393
394 def parseindex(self, fp, st):
394 def parseindex(self, fp, st):
395 s = struct.calcsize(self.indexformat)
395 s = struct.calcsize(self.indexformat)
396 self.index = []
396 self.index = []
397 self.nodemap = {nullid: nullrev}
397 self.nodemap = {nullid: nullrev}
398 inline = self.inlinedata()
398 inline = self.inlinedata()
399 n = 0
399 n = 0
400 leftover = None
400 leftover = None
401 while True:
401 while True:
402 if st:
402 if st:
403 data = fp.read(65536)
403 data = fp.read(65536)
404 else:
404 else:
405 # hack for httprangereader, it doesn't do partial reads well
405 # hack for httprangereader, it doesn't do partial reads well
406 data = fp.read()
406 data = fp.read()
407 if not data:
407 if not data:
408 break
408 break
409 if n == 0 and self.inlinedata():
409 if n == 0 and self.inlinedata():
410 # cache the first chunk
410 # cache the first chunk
411 self.chunkcache = (0, data)
411 self.chunkcache = (0, data)
412 if leftover:
412 if leftover:
413 data = leftover + data
413 data = leftover + data
414 leftover = None
414 leftover = None
415 off = 0
415 off = 0
416 l = len(data)
416 l = len(data)
417 while off < l:
417 while off < l:
418 if l - off < s:
418 if l - off < s:
419 leftover = data[off:]
419 leftover = data[off:]
420 break
420 break
421 cur = data[off:off + s]
421 cur = data[off:off + s]
422 off += s
422 off += s
423 e = struct.unpack(self.indexformat, cur)
423 e = struct.unpack(self.indexformat, cur)
424 self.index.append(e)
424 self.index.append(e)
425 self.nodemap[e[-1]] = n
425 self.nodemap[e[-1]] = n
426 n += 1
426 n += 1
427 if inline:
427 if inline:
428 off += e[1]
428 off += e[1]
429 if off > l:
429 if off > l:
430 # some things don't seek well, just read it
430 # some things don't seek well, just read it
431 fp.read(off - l)
431 fp.read(off - l)
432 if not st:
432 if not st:
433 break
433 break
434
434
435
435
436 def ngoffset(self, q):
436 def ngoffset(self, q):
437 if q & 0xFFFF:
437 if q & 0xFFFF:
438 raise RevlogError(_('%s: incompatible revision flag %x') %
438 raise RevlogError(_('%s: incompatible revision flag %x') %
439 (self.indexfile, q))
439 (self.indexfile, q))
440 return long(q >> 16)
440 return long(q >> 16)
441
441
442 def ngtype(self, q):
442 def ngtype(self, q):
443 return int(q & 0xFFFF)
443 return int(q & 0xFFFF)
444
444
445 def offset_type(self, offset, type):
445 def offset_type(self, offset, type):
446 return long(long(offset) << 16 | type)
446 return long(long(offset) << 16 | type)
447
447
448 def loadindex(self, start, end):
448 def loadindex(self, start, end):
449 """load a block of indexes all at once from the lazy parser"""
449 """load a block of indexes all at once from the lazy parser"""
450 if isinstance(self.index, lazyindex):
450 if isinstance(self.index, lazyindex):
451 self.index.p.loadindex(start, end)
451 self.index.p.loadindex(start, end)
452
452
453 def loadindexmap(self):
453 def loadindexmap(self):
454 """loads both the map and the index from the lazy parser"""
454 """loads both the map and the index from the lazy parser"""
455 if isinstance(self.index, lazyindex):
455 if isinstance(self.index, lazyindex):
456 p = self.index.p
456 p = self.index.p
457 p.loadindex()
457 p.loadindex()
458 self.nodemap = p.map
458 self.nodemap = p.map
459
459
460 def loadmap(self):
460 def loadmap(self):
461 """loads the map from the lazy parser"""
461 """loads the map from the lazy parser"""
462 if isinstance(self.nodemap, lazymap):
462 if isinstance(self.nodemap, lazymap):
463 self.nodemap.p.loadmap()
463 self.nodemap.p.loadmap()
464 self.nodemap = self.nodemap.p.map
464 self.nodemap = self.nodemap.p.map
465
465
466 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
466 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
467 def tip(self): return self.node(len(self.index) - 1)
467 def tip(self): return self.node(len(self.index) - 1)
468 def count(self): return len(self.index)
468 def count(self): return len(self.index)
469 def node(self, rev):
469 def node(self, rev):
470 return rev == nullrev and nullid or self.index[rev][-1]
470 return rev == nullrev and nullid or self.index[rev][-1]
471 def rev(self, node):
471 def rev(self, node):
472 try:
472 try:
473 return self.nodemap[node]
473 return self.nodemap[node]
474 except KeyError:
474 except KeyError:
475 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
475 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
476 def linkrev(self, node):
476 def linkrev(self, node):
477 return (node == nullid) and nullrev or self.index[self.rev(node)][-4]
477 return (node == nullid) and nullrev or self.index[self.rev(node)][-4]
478 def parents(self, node):
478 def parents(self, node):
479 if node == nullid: return (nullid, nullid)
479 if node == nullid: return (nullid, nullid)
480 r = self.rev(node)
480 r = self.rev(node)
481 d = self.index[r][-3:-1]
481 d = self.index[r][-3:-1]
482 if self.version == REVLOGV0:
482 if self.version == REVLOGV0:
483 return d
483 return d
484 return (self.node(d[0]), self.node(d[1]))
484 return (self.node(d[0]), self.node(d[1]))
485 def parentrevs(self, rev):
485 def parentrevs(self, rev):
486 if rev == nullrev:
486 if rev == nullrev:
487 return (nullrev, nullrev)
487 return (nullrev, nullrev)
488 d = self.index[rev][-3:-1]
488 d = self.index[rev][-3:-1]
489 if self.version == REVLOGV0:
489 if self.version == REVLOGV0:
490 return (self.rev(d[0]), self.rev(d[1]))
490 return (self.rev(d[0]), self.rev(d[1]))
491 return d
491 return d
492 def start(self, rev):
492 def start(self, rev):
493 if rev == nullrev:
493 if rev == nullrev:
494 return 0
494 return 0
495 if self.version != REVLOGV0:
495 if self.version != REVLOGV0:
496 return self.ngoffset(self.index[rev][0])
496 return self.ngoffset(self.index[rev][0])
497 return self.index[rev][0]
497 return self.index[rev][0]
498
498
499 def end(self, rev): return self.start(rev) + self.length(rev)
499 def end(self, rev): return self.start(rev) + self.length(rev)
500
500
501 def size(self, rev):
501 def size(self, rev):
502 """return the length of the uncompressed text for a given revision"""
502 """return the length of the uncompressed text for a given revision"""
503 if rev == nullrev:
503 if rev == nullrev:
504 return 0
504 return 0
505 l = -1
505 l = -1
506 if self.version != REVLOGV0:
506 if self.version != REVLOGV0:
507 l = self.index[rev][2]
507 l = self.index[rev][2]
508 if l >= 0:
508 if l >= 0:
509 return l
509 return l
510
510
511 t = self.revision(self.node(rev))
511 t = self.revision(self.node(rev))
512 return len(t)
512 return len(t)
513
513
514 # alternate implementation, The advantage to this code is it
514 # alternate implementation, The advantage to this code is it
515 # will be faster for a single revision. But, the results are not
515 # will be faster for a single revision. But, the results are not
516 # cached, so finding the size of every revision will be slower.
516 # cached, so finding the size of every revision will be slower.
517 """
517 """
518 if self.cache and self.cache[1] == rev:
518 if self.cache and self.cache[1] == rev:
519 return len(self.cache[2])
519 return len(self.cache[2])
520
520
521 base = self.base(rev)
521 base = self.base(rev)
522 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
522 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
523 base = self.cache[1]
523 base = self.cache[1]
524 text = self.cache[2]
524 text = self.cache[2]
525 else:
525 else:
526 text = self.revision(self.node(base))
526 text = self.revision(self.node(base))
527
527
528 l = len(text)
528 l = len(text)
529 for x in xrange(base + 1, rev + 1):
529 for x in xrange(base + 1, rev + 1):
530 l = mdiff.patchedsize(l, self.chunk(x))
530 l = mdiff.patchedsize(l, self.chunk(x))
531 return l
531 return l
532 """
532 """
533
533
534 def length(self, rev):
534 def length(self, rev):
535 if rev == nullrev:
535 if rev == nullrev:
536 return 0
536 return 0
537 else:
537 else:
538 return self.index[rev][1]
538 return self.index[rev][1]
539 def base(self, rev):
539 def base(self, rev):
540 if (rev == nullrev):
540 if (rev == nullrev):
541 return nullrev
541 return nullrev
542 else:
542 else:
543 return self.index[rev][-5]
543 return self.index[rev][-5]
544
544
545 def reachable(self, node, stop=None):
545 def reachable(self, node, stop=None):
546 """return a hash of all nodes ancestral to a given node, including
546 """return a hash of all nodes ancestral to a given node, including
547 the node itself, stopping when stop is matched"""
547 the node itself, stopping when stop is matched"""
548 reachable = {}
548 reachable = {}
549 visit = [node]
549 visit = [node]
550 reachable[node] = 1
550 reachable[node] = 1
551 if stop:
551 if stop:
552 stopn = self.rev(stop)
552 stopn = self.rev(stop)
553 else:
553 else:
554 stopn = 0
554 stopn = 0
555 while visit:
555 while visit:
556 n = visit.pop(0)
556 n = visit.pop(0)
557 if n == stop:
557 if n == stop:
558 continue
558 continue
559 if n == nullid:
559 if n == nullid:
560 continue
560 continue
561 for p in self.parents(n):
561 for p in self.parents(n):
562 if self.rev(p) < stopn:
562 if self.rev(p) < stopn:
563 continue
563 continue
564 if p not in reachable:
564 if p not in reachable:
565 reachable[p] = 1
565 reachable[p] = 1
566 visit.append(p)
566 visit.append(p)
567 return reachable
567 return reachable
568
568
569 def nodesbetween(self, roots=None, heads=None):
569 def nodesbetween(self, roots=None, heads=None):
570 """Return a tuple containing three elements. Elements 1 and 2 contain
570 """Return a tuple containing three elements. Elements 1 and 2 contain
571 a final list bases and heads after all the unreachable ones have been
571 a final list bases and heads after all the unreachable ones have been
572 pruned. Element 0 contains a topologically sorted list of all
572 pruned. Element 0 contains a topologically sorted list of all
573
573
574 nodes that satisfy these constraints:
574 nodes that satisfy these constraints:
575 1. All nodes must be descended from a node in roots (the nodes on
575 1. All nodes must be descended from a node in roots (the nodes on
576 roots are considered descended from themselves).
576 roots are considered descended from themselves).
577 2. All nodes must also be ancestors of a node in heads (the nodes in
577 2. All nodes must also be ancestors of a node in heads (the nodes in
578 heads are considered to be their own ancestors).
578 heads are considered to be their own ancestors).
579
579
580 If roots is unspecified, nullid is assumed as the only root.
580 If roots is unspecified, nullid is assumed as the only root.
581 If heads is unspecified, it is taken to be the output of the
581 If heads is unspecified, it is taken to be the output of the
582 heads method (i.e. a list of all nodes in the repository that
582 heads method (i.e. a list of all nodes in the repository that
583 have no children)."""
583 have no children)."""
584 nonodes = ([], [], [])
584 nonodes = ([], [], [])
585 if roots is not None:
585 if roots is not None:
586 roots = list(roots)
586 roots = list(roots)
587 if not roots:
587 if not roots:
588 return nonodes
588 return nonodes
589 lowestrev = min([self.rev(n) for n in roots])
589 lowestrev = min([self.rev(n) for n in roots])
590 else:
590 else:
591 roots = [nullid] # Everybody's a descendent of nullid
591 roots = [nullid] # Everybody's a descendent of nullid
592 lowestrev = nullrev
592 lowestrev = nullrev
593 if (lowestrev == nullrev) and (heads is None):
593 if (lowestrev == nullrev) and (heads is None):
594 # We want _all_ the nodes!
594 # We want _all_ the nodes!
595 return ([self.node(r) for r in xrange(0, self.count())],
595 return ([self.node(r) for r in xrange(0, self.count())],
596 [nullid], list(self.heads()))
596 [nullid], list(self.heads()))
597 if heads is None:
597 if heads is None:
598 # All nodes are ancestors, so the latest ancestor is the last
598 # All nodes are ancestors, so the latest ancestor is the last
599 # node.
599 # node.
600 highestrev = self.count() - 1
600 highestrev = self.count() - 1
601 # Set ancestors to None to signal that every node is an ancestor.
601 # Set ancestors to None to signal that every node is an ancestor.
602 ancestors = None
602 ancestors = None
603 # Set heads to an empty dictionary for later discovery of heads
603 # Set heads to an empty dictionary for later discovery of heads
604 heads = {}
604 heads = {}
605 else:
605 else:
606 heads = list(heads)
606 heads = list(heads)
607 if not heads:
607 if not heads:
608 return nonodes
608 return nonodes
609 ancestors = {}
609 ancestors = {}
610 # Turn heads into a dictionary so we can remove 'fake' heads.
610 # Turn heads into a dictionary so we can remove 'fake' heads.
611 # Also, later we will be using it to filter out the heads we can't
611 # Also, later we will be using it to filter out the heads we can't
612 # find from roots.
612 # find from roots.
613 heads = dict.fromkeys(heads, 0)
613 heads = dict.fromkeys(heads, 0)
614 # Start at the top and keep marking parents until we're done.
614 # Start at the top and keep marking parents until we're done.
615 nodestotag = heads.keys()
615 nodestotag = heads.keys()
616 # Remember where the top was so we can use it as a limit later.
616 # Remember where the top was so we can use it as a limit later.
617 highestrev = max([self.rev(n) for n in nodestotag])
617 highestrev = max([self.rev(n) for n in nodestotag])
618 while nodestotag:
618 while nodestotag:
619 # grab a node to tag
619 # grab a node to tag
620 n = nodestotag.pop()
620 n = nodestotag.pop()
621 # Never tag nullid
621 # Never tag nullid
622 if n == nullid:
622 if n == nullid:
623 continue
623 continue
624 # A node's revision number represents its place in a
624 # A node's revision number represents its place in a
625 # topologically sorted list of nodes.
625 # topologically sorted list of nodes.
626 r = self.rev(n)
626 r = self.rev(n)
627 if r >= lowestrev:
627 if r >= lowestrev:
628 if n not in ancestors:
628 if n not in ancestors:
629 # If we are possibly a descendent of one of the roots
629 # If we are possibly a descendent of one of the roots
630 # and we haven't already been marked as an ancestor
630 # and we haven't already been marked as an ancestor
631 ancestors[n] = 1 # Mark as ancestor
631 ancestors[n] = 1 # Mark as ancestor
632 # Add non-nullid parents to list of nodes to tag.
632 # Add non-nullid parents to list of nodes to tag.
633 nodestotag.extend([p for p in self.parents(n) if
633 nodestotag.extend([p for p in self.parents(n) if
634 p != nullid])
634 p != nullid])
635 elif n in heads: # We've seen it before, is it a fake head?
635 elif n in heads: # We've seen it before, is it a fake head?
636 # So it is, real heads should not be the ancestors of
636 # So it is, real heads should not be the ancestors of
637 # any other heads.
637 # any other heads.
638 heads.pop(n)
638 heads.pop(n)
639 if not ancestors:
639 if not ancestors:
640 return nonodes
640 return nonodes
641 # Now that we have our set of ancestors, we want to remove any
641 # Now that we have our set of ancestors, we want to remove any
642 # roots that are not ancestors.
642 # roots that are not ancestors.
643
643
644 # If one of the roots was nullid, everything is included anyway.
644 # If one of the roots was nullid, everything is included anyway.
645 if lowestrev > nullrev:
645 if lowestrev > nullrev:
646 # But, since we weren't, let's recompute the lowest rev to not
646 # But, since we weren't, let's recompute the lowest rev to not
647 # include roots that aren't ancestors.
647 # include roots that aren't ancestors.
648
648
649 # Filter out roots that aren't ancestors of heads
649 # Filter out roots that aren't ancestors of heads
650 roots = [n for n in roots if n in ancestors]
650 roots = [n for n in roots if n in ancestors]
651 # Recompute the lowest revision
651 # Recompute the lowest revision
652 if roots:
652 if roots:
653 lowestrev = min([self.rev(n) for n in roots])
653 lowestrev = min([self.rev(n) for n in roots])
654 else:
654 else:
655 # No more roots? Return empty list
655 # No more roots? Return empty list
656 return nonodes
656 return nonodes
657 else:
657 else:
658 # We are descending from nullid, and don't need to care about
658 # We are descending from nullid, and don't need to care about
659 # any other roots.
659 # any other roots.
660 lowestrev = nullrev
660 lowestrev = nullrev
661 roots = [nullid]
661 roots = [nullid]
662 # Transform our roots list into a 'set' (i.e. a dictionary where the
662 # Transform our roots list into a 'set' (i.e. a dictionary where the
663 # values don't matter.
663 # values don't matter.
664 descendents = dict.fromkeys(roots, 1)
664 descendents = dict.fromkeys(roots, 1)
665 # Also, keep the original roots so we can filter out roots that aren't
665 # Also, keep the original roots so we can filter out roots that aren't
666 # 'real' roots (i.e. are descended from other roots).
666 # 'real' roots (i.e. are descended from other roots).
667 roots = descendents.copy()
667 roots = descendents.copy()
668 # Our topologically sorted list of output nodes.
668 # Our topologically sorted list of output nodes.
669 orderedout = []
669 orderedout = []
670 # Don't start at nullid since we don't want nullid in our output list,
670 # Don't start at nullid since we don't want nullid in our output list,
671 # and if nullid shows up in descedents, empty parents will look like
671 # and if nullid shows up in descedents, empty parents will look like
672 # they're descendents.
672 # they're descendents.
673 for r in xrange(max(lowestrev, 0), highestrev + 1):
673 for r in xrange(max(lowestrev, 0), highestrev + 1):
674 n = self.node(r)
674 n = self.node(r)
675 isdescendent = False
675 isdescendent = False
676 if lowestrev == nullrev: # Everybody is a descendent of nullid
676 if lowestrev == nullrev: # Everybody is a descendent of nullid
677 isdescendent = True
677 isdescendent = True
678 elif n in descendents:
678 elif n in descendents:
679 # n is already a descendent
679 # n is already a descendent
680 isdescendent = True
680 isdescendent = True
681 # This check only needs to be done here because all the roots
681 # This check only needs to be done here because all the roots
682 # will start being marked is descendents before the loop.
682 # will start being marked is descendents before the loop.
683 if n in roots:
683 if n in roots:
684 # If n was a root, check if it's a 'real' root.
684 # If n was a root, check if it's a 'real' root.
685 p = tuple(self.parents(n))
685 p = tuple(self.parents(n))
686 # If any of its parents are descendents, it's not a root.
686 # If any of its parents are descendents, it's not a root.
687 if (p[0] in descendents) or (p[1] in descendents):
687 if (p[0] in descendents) or (p[1] in descendents):
688 roots.pop(n)
688 roots.pop(n)
689 else:
689 else:
690 p = tuple(self.parents(n))
690 p = tuple(self.parents(n))
691 # A node is a descendent if either of its parents are
691 # A node is a descendent if either of its parents are
692 # descendents. (We seeded the dependents list with the roots
692 # descendents. (We seeded the dependents list with the roots
693 # up there, remember?)
693 # up there, remember?)
694 if (p[0] in descendents) or (p[1] in descendents):
694 if (p[0] in descendents) or (p[1] in descendents):
695 descendents[n] = 1
695 descendents[n] = 1
696 isdescendent = True
696 isdescendent = True
697 if isdescendent and ((ancestors is None) or (n in ancestors)):
697 if isdescendent and ((ancestors is None) or (n in ancestors)):
698 # Only include nodes that are both descendents and ancestors.
698 # Only include nodes that are both descendents and ancestors.
699 orderedout.append(n)
699 orderedout.append(n)
700 if (ancestors is not None) and (n in heads):
700 if (ancestors is not None) and (n in heads):
701 # We're trying to figure out which heads are reachable
701 # We're trying to figure out which heads are reachable
702 # from roots.
702 # from roots.
703 # Mark this head as having been reached
703 # Mark this head as having been reached
704 heads[n] = 1
704 heads[n] = 1
705 elif ancestors is None:
705 elif ancestors is None:
706 # Otherwise, we're trying to discover the heads.
706 # Otherwise, we're trying to discover the heads.
707 # Assume this is a head because if it isn't, the next step
707 # Assume this is a head because if it isn't, the next step
708 # will eventually remove it.
708 # will eventually remove it.
709 heads[n] = 1
709 heads[n] = 1
710 # But, obviously its parents aren't.
710 # But, obviously its parents aren't.
711 for p in self.parents(n):
711 for p in self.parents(n):
712 heads.pop(p, None)
712 heads.pop(p, None)
713 heads = [n for n in heads.iterkeys() if heads[n] != 0]
713 heads = [n for n in heads.iterkeys() if heads[n] != 0]
714 roots = roots.keys()
714 roots = roots.keys()
715 assert orderedout
715 assert orderedout
716 assert roots
716 assert roots
717 assert heads
717 assert heads
718 return (orderedout, roots, heads)
718 return (orderedout, roots, heads)
719
719
720 def heads(self, start=None):
720 def heads(self, start=None, stop=None):
721 """return the list of all nodes that have no children
721 """return the list of all nodes that have no children
722
722
723 if start is specified, only heads that are descendants of
723 if start is specified, only heads that are descendants of
724 start will be returned
724 start will be returned
725
725 if stop is specified, it will consider all the revs from stop
726 as if they had no children
726 """
727 """
727 if start is None:
728 if start is None:
728 start = nullid
729 start = nullid
730 if stop is None:
731 stop = []
732 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
729 startrev = self.rev(start)
733 startrev = self.rev(start)
730 reachable = {startrev: 1}
734 reachable = {startrev: 1}
731 heads = {startrev: 1}
735 heads = {startrev: 1}
732
736
733 parentrevs = self.parentrevs
737 parentrevs = self.parentrevs
734 for r in xrange(startrev + 1, self.count()):
738 for r in xrange(startrev + 1, self.count()):
735 for p in parentrevs(r):
739 for p in parentrevs(r):
736 if p in reachable:
740 if p in reachable:
737 reachable[r] = 1
741 if r not in stoprevs:
742 reachable[r] = 1
738 heads[r] = 1
743 heads[r] = 1
739 if p in heads:
744 if p in heads and p not in stoprevs:
740 del heads[p]
745 del heads[p]
746
741 return [self.node(r) for r in heads]
747 return [self.node(r) for r in heads]
742
748
743 def children(self, node):
749 def children(self, node):
744 """find the children of a given node"""
750 """find the children of a given node"""
745 c = []
751 c = []
746 p = self.rev(node)
752 p = self.rev(node)
747 for r in range(p + 1, self.count()):
753 for r in range(p + 1, self.count()):
748 for pr in self.parentrevs(r):
754 for pr in self.parentrevs(r):
749 if pr == p:
755 if pr == p:
750 c.append(self.node(r))
756 c.append(self.node(r))
751 return c
757 return c
752
758
753 def _match(self, id):
759 def _match(self, id):
754 if isinstance(id, (long, int)):
760 if isinstance(id, (long, int)):
755 # rev
761 # rev
756 return self.node(id)
762 return self.node(id)
757 if len(id) == 20:
763 if len(id) == 20:
758 # possibly a binary node
764 # possibly a binary node
759 # odds of a binary node being all hex in ASCII are 1 in 10**25
765 # odds of a binary node being all hex in ASCII are 1 in 10**25
760 try:
766 try:
761 node = id
767 node = id
762 r = self.rev(node) # quick search the index
768 r = self.rev(node) # quick search the index
763 return node
769 return node
764 except RevlogError:
770 except RevlogError:
765 pass # may be partial hex id
771 pass # may be partial hex id
766 try:
772 try:
767 # str(rev)
773 # str(rev)
768 rev = int(id)
774 rev = int(id)
769 if str(rev) != id: raise ValueError
775 if str(rev) != id: raise ValueError
770 if rev < 0: rev = self.count() + rev
776 if rev < 0: rev = self.count() + rev
771 if rev < 0 or rev >= self.count(): raise ValueError
777 if rev < 0 or rev >= self.count(): raise ValueError
772 return self.node(rev)
778 return self.node(rev)
773 except (ValueError, OverflowError):
779 except (ValueError, OverflowError):
774 pass
780 pass
775 if len(id) == 40:
781 if len(id) == 40:
776 try:
782 try:
777 # a full hex nodeid?
783 # a full hex nodeid?
778 node = bin(id)
784 node = bin(id)
779 r = self.rev(node)
785 r = self.rev(node)
780 return node
786 return node
781 except TypeError:
787 except TypeError:
782 pass
788 pass
783
789
784 def _partialmatch(self, id):
790 def _partialmatch(self, id):
785 if len(id) < 40:
791 if len(id) < 40:
786 try:
792 try:
787 # hex(node)[:...]
793 # hex(node)[:...]
788 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
794 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
789 node = None
795 node = None
790 for n in self.nodemap:
796 for n in self.nodemap:
791 if n.startswith(bin_id) and hex(n).startswith(id):
797 if n.startswith(bin_id) and hex(n).startswith(id):
792 if node is not None:
798 if node is not None:
793 raise RevlogError(_("Ambiguous identifier"))
799 raise RevlogError(_("Ambiguous identifier"))
794 node = n
800 node = n
795 if node is not None:
801 if node is not None:
796 return node
802 return node
797 except TypeError:
803 except TypeError:
798 pass
804 pass
799
805
800 def lookup(self, id):
806 def lookup(self, id):
801 """locate a node based on:
807 """locate a node based on:
802 - revision number or str(revision number)
808 - revision number or str(revision number)
803 - nodeid or subset of hex nodeid
809 - nodeid or subset of hex nodeid
804 """
810 """
805
811
806 n = self._match(id)
812 n = self._match(id)
807 if n is not None:
813 if n is not None:
808 return n
814 return n
809 n = self._partialmatch(id)
815 n = self._partialmatch(id)
810 if n:
816 if n:
811 return n
817 return n
812
818
813 raise RevlogError(_("No match found"))
819 raise RevlogError(_("No match found"))
814
820
815 def cmp(self, node, text):
821 def cmp(self, node, text):
816 """compare text with a given file revision"""
822 """compare text with a given file revision"""
817 p1, p2 = self.parents(node)
823 p1, p2 = self.parents(node)
818 return hash(text, p1, p2) != node
824 return hash(text, p1, p2) != node
819
825
820 def makenode(self, node, text):
826 def makenode(self, node, text):
821 """calculate a file nodeid for text, descended or possibly
827 """calculate a file nodeid for text, descended or possibly
822 unchanged from node"""
828 unchanged from node"""
823
829
824 if self.cmp(node, text):
830 if self.cmp(node, text):
825 return hash(text, node, nullid)
831 return hash(text, node, nullid)
826 return node
832 return node
827
833
828 def diff(self, a, b):
834 def diff(self, a, b):
829 """return a delta between two revisions"""
835 """return a delta between two revisions"""
830 return mdiff.textdiff(a, b)
836 return mdiff.textdiff(a, b)
831
837
832 def patches(self, t, pl):
838 def patches(self, t, pl):
833 """apply a list of patches to a string"""
839 """apply a list of patches to a string"""
834 return mdiff.patches(t, pl)
840 return mdiff.patches(t, pl)
835
841
836 def chunk(self, rev, df=None, cachelen=4096):
842 def chunk(self, rev, df=None, cachelen=4096):
837 start, length = self.start(rev), self.length(rev)
843 start, length = self.start(rev), self.length(rev)
838 inline = self.inlinedata()
844 inline = self.inlinedata()
839 if inline:
845 if inline:
840 start += (rev + 1) * struct.calcsize(self.indexformat)
846 start += (rev + 1) * struct.calcsize(self.indexformat)
841 end = start + length
847 end = start + length
842 def loadcache(df):
848 def loadcache(df):
843 cache_length = max(cachelen, length) # 4k
849 cache_length = max(cachelen, length) # 4k
844 if not df:
850 if not df:
845 if inline:
851 if inline:
846 df = self.opener(self.indexfile)
852 df = self.opener(self.indexfile)
847 else:
853 else:
848 df = self.opener(self.datafile)
854 df = self.opener(self.datafile)
849 df.seek(start)
855 df.seek(start)
850 self.chunkcache = (start, df.read(cache_length))
856 self.chunkcache = (start, df.read(cache_length))
851
857
852 if not self.chunkcache:
858 if not self.chunkcache:
853 loadcache(df)
859 loadcache(df)
854
860
855 cache_start = self.chunkcache[0]
861 cache_start = self.chunkcache[0]
856 cache_end = cache_start + len(self.chunkcache[1])
862 cache_end = cache_start + len(self.chunkcache[1])
857 if start >= cache_start and end <= cache_end:
863 if start >= cache_start and end <= cache_end:
858 # it is cached
864 # it is cached
859 offset = start - cache_start
865 offset = start - cache_start
860 else:
866 else:
861 loadcache(df)
867 loadcache(df)
862 offset = 0
868 offset = 0
863
869
864 #def checkchunk():
870 #def checkchunk():
865 # df = self.opener(self.datafile)
871 # df = self.opener(self.datafile)
866 # df.seek(start)
872 # df.seek(start)
867 # return df.read(length)
873 # return df.read(length)
868 #assert s == checkchunk()
874 #assert s == checkchunk()
869 return decompress(self.chunkcache[1][offset:offset + length])
875 return decompress(self.chunkcache[1][offset:offset + length])
870
876
871 def delta(self, node):
877 def delta(self, node):
872 """return or calculate a delta between a node and its predecessor"""
878 """return or calculate a delta between a node and its predecessor"""
873 r = self.rev(node)
879 r = self.rev(node)
874 return self.revdiff(r - 1, r)
880 return self.revdiff(r - 1, r)
875
881
876 def revdiff(self, rev1, rev2):
882 def revdiff(self, rev1, rev2):
877 """return or calculate a delta between two revisions"""
883 """return or calculate a delta between two revisions"""
878 b1 = self.base(rev1)
884 b1 = self.base(rev1)
879 b2 = self.base(rev2)
885 b2 = self.base(rev2)
880 if b1 == b2 and rev1 + 1 == rev2:
886 if b1 == b2 and rev1 + 1 == rev2:
881 return self.chunk(rev2)
887 return self.chunk(rev2)
882 else:
888 else:
883 return self.diff(self.revision(self.node(rev1)),
889 return self.diff(self.revision(self.node(rev1)),
884 self.revision(self.node(rev2)))
890 self.revision(self.node(rev2)))
885
891
886 def revision(self, node):
892 def revision(self, node):
887 """return an uncompressed revision of a given"""
893 """return an uncompressed revision of a given"""
888 if node == nullid: return ""
894 if node == nullid: return ""
889 if self.cache and self.cache[0] == node: return self.cache[2]
895 if self.cache and self.cache[0] == node: return self.cache[2]
890
896
891 # look up what we need to read
897 # look up what we need to read
892 text = None
898 text = None
893 rev = self.rev(node)
899 rev = self.rev(node)
894 base = self.base(rev)
900 base = self.base(rev)
895
901
896 if self.inlinedata():
902 if self.inlinedata():
897 # we probably have the whole chunk cached
903 # we probably have the whole chunk cached
898 df = None
904 df = None
899 else:
905 else:
900 df = self.opener(self.datafile)
906 df = self.opener(self.datafile)
901
907
902 # do we have useful data cached?
908 # do we have useful data cached?
903 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
909 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
904 base = self.cache[1]
910 base = self.cache[1]
905 text = self.cache[2]
911 text = self.cache[2]
906 self.loadindex(base, rev + 1)
912 self.loadindex(base, rev + 1)
907 else:
913 else:
908 self.loadindex(base, rev + 1)
914 self.loadindex(base, rev + 1)
909 text = self.chunk(base, df=df)
915 text = self.chunk(base, df=df)
910
916
911 bins = []
917 bins = []
912 for r in xrange(base + 1, rev + 1):
918 for r in xrange(base + 1, rev + 1):
913 bins.append(self.chunk(r, df=df))
919 bins.append(self.chunk(r, df=df))
914
920
915 text = self.patches(text, bins)
921 text = self.patches(text, bins)
916
922
917 p1, p2 = self.parents(node)
923 p1, p2 = self.parents(node)
918 if node != hash(text, p1, p2):
924 if node != hash(text, p1, p2):
919 raise RevlogError(_("integrity check failed on %s:%d")
925 raise RevlogError(_("integrity check failed on %s:%d")
920 % (self.datafile, rev))
926 % (self.datafile, rev))
921
927
922 self.cache = (node, rev, text)
928 self.cache = (node, rev, text)
923 return text
929 return text
924
930
925 def checkinlinesize(self, tr, fp=None):
931 def checkinlinesize(self, tr, fp=None):
926 if not self.inlinedata():
932 if not self.inlinedata():
927 return
933 return
928 if not fp:
934 if not fp:
929 fp = self.opener(self.indexfile, 'r')
935 fp = self.opener(self.indexfile, 'r')
930 fp.seek(0, 2)
936 fp.seek(0, 2)
931 size = fp.tell()
937 size = fp.tell()
932 if size < 131072:
938 if size < 131072:
933 return
939 return
934 trinfo = tr.find(self.indexfile)
940 trinfo = tr.find(self.indexfile)
935 if trinfo == None:
941 if trinfo == None:
936 raise RevlogError(_("%s not found in the transaction")
942 raise RevlogError(_("%s not found in the transaction")
937 % self.indexfile)
943 % self.indexfile)
938
944
939 trindex = trinfo[2]
945 trindex = trinfo[2]
940 dataoff = self.start(trindex)
946 dataoff = self.start(trindex)
941
947
942 tr.add(self.datafile, dataoff)
948 tr.add(self.datafile, dataoff)
943 df = self.opener(self.datafile, 'w')
949 df = self.opener(self.datafile, 'w')
944 calc = struct.calcsize(self.indexformat)
950 calc = struct.calcsize(self.indexformat)
945 for r in xrange(self.count()):
951 for r in xrange(self.count()):
946 start = self.start(r) + (r + 1) * calc
952 start = self.start(r) + (r + 1) * calc
947 length = self.length(r)
953 length = self.length(r)
948 fp.seek(start)
954 fp.seek(start)
949 d = fp.read(length)
955 d = fp.read(length)
950 df.write(d)
956 df.write(d)
951 fp.close()
957 fp.close()
952 df.close()
958 df.close()
953 fp = self.opener(self.indexfile, 'w', atomictemp=True)
959 fp = self.opener(self.indexfile, 'w', atomictemp=True)
954 self.version &= ~(REVLOGNGINLINEDATA)
960 self.version &= ~(REVLOGNGINLINEDATA)
955 if self.count():
961 if self.count():
956 x = self.index[0]
962 x = self.index[0]
957 e = struct.pack(self.indexformat, *x)[4:]
963 e = struct.pack(self.indexformat, *x)[4:]
958 l = struct.pack(versionformat, self.version)
964 l = struct.pack(versionformat, self.version)
959 fp.write(l)
965 fp.write(l)
960 fp.write(e)
966 fp.write(e)
961
967
962 for i in xrange(1, self.count()):
968 for i in xrange(1, self.count()):
963 x = self.index[i]
969 x = self.index[i]
964 e = struct.pack(self.indexformat, *x)
970 e = struct.pack(self.indexformat, *x)
965 fp.write(e)
971 fp.write(e)
966
972
967 # if we don't call rename, the temp file will never replace the
973 # if we don't call rename, the temp file will never replace the
968 # real index
974 # real index
969 fp.rename()
975 fp.rename()
970
976
971 tr.replace(self.indexfile, trindex * calc)
977 tr.replace(self.indexfile, trindex * calc)
972 self.chunkcache = None
978 self.chunkcache = None
973
979
974 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
980 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
975 """add a revision to the log
981 """add a revision to the log
976
982
977 text - the revision data to add
983 text - the revision data to add
978 transaction - the transaction object used for rollback
984 transaction - the transaction object used for rollback
979 link - the linkrev data to add
985 link - the linkrev data to add
980 p1, p2 - the parent nodeids of the revision
986 p1, p2 - the parent nodeids of the revision
981 d - an optional precomputed delta
987 d - an optional precomputed delta
982 """
988 """
983 if not self.inlinedata():
989 if not self.inlinedata():
984 dfh = self.opener(self.datafile, "a")
990 dfh = self.opener(self.datafile, "a")
985 else:
991 else:
986 dfh = None
992 dfh = None
987 ifh = self.opener(self.indexfile, "a+")
993 ifh = self.opener(self.indexfile, "a+")
988 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
994 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
989
995
990 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
996 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
991 if text is None: text = ""
997 if text is None: text = ""
992 if p1 is None: p1 = self.tip()
998 if p1 is None: p1 = self.tip()
993 if p2 is None: p2 = nullid
999 if p2 is None: p2 = nullid
994
1000
995 node = hash(text, p1, p2)
1001 node = hash(text, p1, p2)
996
1002
997 if node in self.nodemap:
1003 if node in self.nodemap:
998 return node
1004 return node
999
1005
1000 n = self.count()
1006 n = self.count()
1001 t = n - 1
1007 t = n - 1
1002
1008
1003 if n:
1009 if n:
1004 base = self.base(t)
1010 base = self.base(t)
1005 start = self.start(base)
1011 start = self.start(base)
1006 end = self.end(t)
1012 end = self.end(t)
1007 if not d:
1013 if not d:
1008 prev = self.revision(self.tip())
1014 prev = self.revision(self.tip())
1009 d = self.diff(prev, text)
1015 d = self.diff(prev, text)
1010 data = compress(d)
1016 data = compress(d)
1011 l = len(data[1]) + len(data[0])
1017 l = len(data[1]) + len(data[0])
1012 dist = end - start + l
1018 dist = end - start + l
1013
1019
1014 # full versions are inserted when the needed deltas
1020 # full versions are inserted when the needed deltas
1015 # become comparable to the uncompressed text
1021 # become comparable to the uncompressed text
1016 if not n or dist > len(text) * 2:
1022 if not n or dist > len(text) * 2:
1017 data = compress(text)
1023 data = compress(text)
1018 l = len(data[1]) + len(data[0])
1024 l = len(data[1]) + len(data[0])
1019 base = n
1025 base = n
1020 else:
1026 else:
1021 base = self.base(t)
1027 base = self.base(t)
1022
1028
1023 offset = 0
1029 offset = 0
1024 if t >= 0:
1030 if t >= 0:
1025 offset = self.end(t)
1031 offset = self.end(t)
1026
1032
1027 if self.version == REVLOGV0:
1033 if self.version == REVLOGV0:
1028 e = (offset, l, base, link, p1, p2, node)
1034 e = (offset, l, base, link, p1, p2, node)
1029 else:
1035 else:
1030 e = (self.offset_type(offset, 0), l, len(text),
1036 e = (self.offset_type(offset, 0), l, len(text),
1031 base, link, self.rev(p1), self.rev(p2), node)
1037 base, link, self.rev(p1), self.rev(p2), node)
1032
1038
1033 self.index.append(e)
1039 self.index.append(e)
1034 self.nodemap[node] = n
1040 self.nodemap[node] = n
1035 entry = struct.pack(self.indexformat, *e)
1041 entry = struct.pack(self.indexformat, *e)
1036
1042
1037 if not self.inlinedata():
1043 if not self.inlinedata():
1038 transaction.add(self.datafile, offset)
1044 transaction.add(self.datafile, offset)
1039 transaction.add(self.indexfile, n * len(entry))
1045 transaction.add(self.indexfile, n * len(entry))
1040 if data[0]:
1046 if data[0]:
1041 dfh.write(data[0])
1047 dfh.write(data[0])
1042 dfh.write(data[1])
1048 dfh.write(data[1])
1043 dfh.flush()
1049 dfh.flush()
1044 else:
1050 else:
1045 ifh.seek(0, 2)
1051 ifh.seek(0, 2)
1046 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1052 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1047
1053
1048 if len(self.index) == 1 and self.version != REVLOGV0:
1054 if len(self.index) == 1 and self.version != REVLOGV0:
1049 l = struct.pack(versionformat, self.version)
1055 l = struct.pack(versionformat, self.version)
1050 ifh.write(l)
1056 ifh.write(l)
1051 entry = entry[4:]
1057 entry = entry[4:]
1052
1058
1053 ifh.write(entry)
1059 ifh.write(entry)
1054
1060
1055 if self.inlinedata():
1061 if self.inlinedata():
1056 ifh.write(data[0])
1062 ifh.write(data[0])
1057 ifh.write(data[1])
1063 ifh.write(data[1])
1058 self.checkinlinesize(transaction, ifh)
1064 self.checkinlinesize(transaction, ifh)
1059
1065
1060 self.cache = (node, n, text)
1066 self.cache = (node, n, text)
1061 return node
1067 return node
1062
1068
1063 def ancestor(self, a, b):
1069 def ancestor(self, a, b):
1064 """calculate the least common ancestor of nodes a and b"""
1070 """calculate the least common ancestor of nodes a and b"""
1065
1071
1066 def parents(rev):
1072 def parents(rev):
1067 return [p for p in self.parentrevs(rev) if p != nullrev]
1073 return [p for p in self.parentrevs(rev) if p != nullrev]
1068
1074
1069 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1075 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1070 if c is None:
1076 if c is None:
1071 return nullid
1077 return nullid
1072
1078
1073 return self.node(c)
1079 return self.node(c)
1074
1080
1075 def group(self, nodelist, lookup, infocollect=None):
1081 def group(self, nodelist, lookup, infocollect=None):
1076 """calculate a delta group
1082 """calculate a delta group
1077
1083
1078 Given a list of changeset revs, return a set of deltas and
1084 Given a list of changeset revs, return a set of deltas and
1079 metadata corresponding to nodes. the first delta is
1085 metadata corresponding to nodes. the first delta is
1080 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1086 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1081 have this parent as it has all history before these
1087 have this parent as it has all history before these
1082 changesets. parent is parent[0]
1088 changesets. parent is parent[0]
1083 """
1089 """
1084 revs = [self.rev(n) for n in nodelist]
1090 revs = [self.rev(n) for n in nodelist]
1085
1091
1086 # if we don't have any revisions touched by these changesets, bail
1092 # if we don't have any revisions touched by these changesets, bail
1087 if not revs:
1093 if not revs:
1088 yield changegroup.closechunk()
1094 yield changegroup.closechunk()
1089 return
1095 return
1090
1096
1091 # add the parent of the first rev
1097 # add the parent of the first rev
1092 p = self.parents(self.node(revs[0]))[0]
1098 p = self.parents(self.node(revs[0]))[0]
1093 revs.insert(0, self.rev(p))
1099 revs.insert(0, self.rev(p))
1094
1100
1095 # build deltas
1101 # build deltas
1096 for d in xrange(0, len(revs) - 1):
1102 for d in xrange(0, len(revs) - 1):
1097 a, b = revs[d], revs[d + 1]
1103 a, b = revs[d], revs[d + 1]
1098 nb = self.node(b)
1104 nb = self.node(b)
1099
1105
1100 if infocollect is not None:
1106 if infocollect is not None:
1101 infocollect(nb)
1107 infocollect(nb)
1102
1108
1103 d = self.revdiff(a, b)
1109 d = self.revdiff(a, b)
1104 p = self.parents(nb)
1110 p = self.parents(nb)
1105 meta = nb + p[0] + p[1] + lookup(nb)
1111 meta = nb + p[0] + p[1] + lookup(nb)
1106 yield changegroup.genchunk("%s%s" % (meta, d))
1112 yield changegroup.genchunk("%s%s" % (meta, d))
1107
1113
1108 yield changegroup.closechunk()
1114 yield changegroup.closechunk()
1109
1115
1110 def addgroup(self, revs, linkmapper, transaction, unique=0):
1116 def addgroup(self, revs, linkmapper, transaction, unique=0):
1111 """
1117 """
1112 add a delta group
1118 add a delta group
1113
1119
1114 given a set of deltas, add them to the revision log. the
1120 given a set of deltas, add them to the revision log. the
1115 first delta is against its parent, which should be in our
1121 first delta is against its parent, which should be in our
1116 log, the rest are against the previous delta.
1122 log, the rest are against the previous delta.
1117 """
1123 """
1118
1124
1119 #track the base of the current delta log
1125 #track the base of the current delta log
1120 r = self.count()
1126 r = self.count()
1121 t = r - 1
1127 t = r - 1
1122 node = None
1128 node = None
1123
1129
1124 base = prev = nullrev
1130 base = prev = nullrev
1125 start = end = textlen = 0
1131 start = end = textlen = 0
1126 if r:
1132 if r:
1127 end = self.end(t)
1133 end = self.end(t)
1128
1134
1129 ifh = self.opener(self.indexfile, "a+")
1135 ifh = self.opener(self.indexfile, "a+")
1130 ifh.seek(0, 2)
1136 ifh.seek(0, 2)
1131 transaction.add(self.indexfile, ifh.tell(), self.count())
1137 transaction.add(self.indexfile, ifh.tell(), self.count())
1132 if self.inlinedata():
1138 if self.inlinedata():
1133 dfh = None
1139 dfh = None
1134 else:
1140 else:
1135 transaction.add(self.datafile, end)
1141 transaction.add(self.datafile, end)
1136 dfh = self.opener(self.datafile, "a")
1142 dfh = self.opener(self.datafile, "a")
1137
1143
1138 # loop through our set of deltas
1144 # loop through our set of deltas
1139 chain = None
1145 chain = None
1140 for chunk in revs:
1146 for chunk in revs:
1141 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1147 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1142 link = linkmapper(cs)
1148 link = linkmapper(cs)
1143 if node in self.nodemap:
1149 if node in self.nodemap:
1144 # this can happen if two branches make the same change
1150 # this can happen if two branches make the same change
1145 # if unique:
1151 # if unique:
1146 # raise RevlogError(_("already have %s") % hex(node[:4]))
1152 # raise RevlogError(_("already have %s") % hex(node[:4]))
1147 chain = node
1153 chain = node
1148 continue
1154 continue
1149 delta = chunk[80:]
1155 delta = chunk[80:]
1150
1156
1151 for p in (p1, p2):
1157 for p in (p1, p2):
1152 if not p in self.nodemap:
1158 if not p in self.nodemap:
1153 raise RevlogError(_("unknown parent %s") % short(p))
1159 raise RevlogError(_("unknown parent %s") % short(p))
1154
1160
1155 if not chain:
1161 if not chain:
1156 # retrieve the parent revision of the delta chain
1162 # retrieve the parent revision of the delta chain
1157 chain = p1
1163 chain = p1
1158 if not chain in self.nodemap:
1164 if not chain in self.nodemap:
1159 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1165 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1160
1166
1161 # full versions are inserted when the needed deltas become
1167 # full versions are inserted when the needed deltas become
1162 # comparable to the uncompressed text or when the previous
1168 # comparable to the uncompressed text or when the previous
1163 # version is not the one we have a delta against. We use
1169 # version is not the one we have a delta against. We use
1164 # the size of the previous full rev as a proxy for the
1170 # the size of the previous full rev as a proxy for the
1165 # current size.
1171 # current size.
1166
1172
1167 if chain == prev:
1173 if chain == prev:
1168 tempd = compress(delta)
1174 tempd = compress(delta)
1169 cdelta = tempd[0] + tempd[1]
1175 cdelta = tempd[0] + tempd[1]
1170 textlen = mdiff.patchedsize(textlen, delta)
1176 textlen = mdiff.patchedsize(textlen, delta)
1171
1177
1172 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1178 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1173 # flush our writes here so we can read it in revision
1179 # flush our writes here so we can read it in revision
1174 if dfh:
1180 if dfh:
1175 dfh.flush()
1181 dfh.flush()
1176 ifh.flush()
1182 ifh.flush()
1177 text = self.revision(chain)
1183 text = self.revision(chain)
1178 text = self.patches(text, [delta])
1184 text = self.patches(text, [delta])
1179 chk = self._addrevision(text, transaction, link, p1, p2, None,
1185 chk = self._addrevision(text, transaction, link, p1, p2, None,
1180 ifh, dfh)
1186 ifh, dfh)
1181 if not dfh and not self.inlinedata():
1187 if not dfh and not self.inlinedata():
1182 # addrevision switched from inline to conventional
1188 # addrevision switched from inline to conventional
1183 # reopen the index
1189 # reopen the index
1184 dfh = self.opener(self.datafile, "a")
1190 dfh = self.opener(self.datafile, "a")
1185 ifh = self.opener(self.indexfile, "a")
1191 ifh = self.opener(self.indexfile, "a")
1186 if chk != node:
1192 if chk != node:
1187 raise RevlogError(_("consistency error adding group"))
1193 raise RevlogError(_("consistency error adding group"))
1188 textlen = len(text)
1194 textlen = len(text)
1189 else:
1195 else:
1190 if self.version == REVLOGV0:
1196 if self.version == REVLOGV0:
1191 e = (end, len(cdelta), base, link, p1, p2, node)
1197 e = (end, len(cdelta), base, link, p1, p2, node)
1192 else:
1198 else:
1193 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1199 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1194 link, self.rev(p1), self.rev(p2), node)
1200 link, self.rev(p1), self.rev(p2), node)
1195 self.index.append(e)
1201 self.index.append(e)
1196 self.nodemap[node] = r
1202 self.nodemap[node] = r
1197 if self.inlinedata():
1203 if self.inlinedata():
1198 ifh.write(struct.pack(self.indexformat, *e))
1204 ifh.write(struct.pack(self.indexformat, *e))
1199 ifh.write(cdelta)
1205 ifh.write(cdelta)
1200 self.checkinlinesize(transaction, ifh)
1206 self.checkinlinesize(transaction, ifh)
1201 if not self.inlinedata():
1207 if not self.inlinedata():
1202 dfh = self.opener(self.datafile, "a")
1208 dfh = self.opener(self.datafile, "a")
1203 ifh = self.opener(self.indexfile, "a")
1209 ifh = self.opener(self.indexfile, "a")
1204 else:
1210 else:
1205 dfh.write(cdelta)
1211 dfh.write(cdelta)
1206 ifh.write(struct.pack(self.indexformat, *e))
1212 ifh.write(struct.pack(self.indexformat, *e))
1207
1213
1208 t, r, chain, prev = r, r + 1, node, node
1214 t, r, chain, prev = r, r + 1, node, node
1209 base = self.base(t)
1215 base = self.base(t)
1210 start = self.start(base)
1216 start = self.start(base)
1211 end = self.end(t)
1217 end = self.end(t)
1212
1218
1213 return node
1219 return node
1214
1220
1215 def strip(self, rev, minlink):
1221 def strip(self, rev, minlink):
1216 if self.count() == 0 or rev >= self.count():
1222 if self.count() == 0 or rev >= self.count():
1217 return
1223 return
1218
1224
1219 if isinstance(self.index, lazyindex):
1225 if isinstance(self.index, lazyindex):
1220 self.loadindexmap()
1226 self.loadindexmap()
1221
1227
1222 # When stripping away a revision, we need to make sure it
1228 # When stripping away a revision, we need to make sure it
1223 # does not actually belong to an older changeset.
1229 # does not actually belong to an older changeset.
1224 # The minlink parameter defines the oldest revision
1230 # The minlink parameter defines the oldest revision
1225 # we're allowed to strip away.
1231 # we're allowed to strip away.
1226 while minlink > self.index[rev][-4]:
1232 while minlink > self.index[rev][-4]:
1227 rev += 1
1233 rev += 1
1228 if rev >= self.count():
1234 if rev >= self.count():
1229 return
1235 return
1230
1236
1231 # first truncate the files on disk
1237 # first truncate the files on disk
1232 end = self.start(rev)
1238 end = self.start(rev)
1233 if not self.inlinedata():
1239 if not self.inlinedata():
1234 df = self.opener(self.datafile, "a")
1240 df = self.opener(self.datafile, "a")
1235 df.truncate(end)
1241 df.truncate(end)
1236 end = rev * struct.calcsize(self.indexformat)
1242 end = rev * struct.calcsize(self.indexformat)
1237 else:
1243 else:
1238 end += rev * struct.calcsize(self.indexformat)
1244 end += rev * struct.calcsize(self.indexformat)
1239
1245
1240 indexf = self.opener(self.indexfile, "a")
1246 indexf = self.opener(self.indexfile, "a")
1241 indexf.truncate(end)
1247 indexf.truncate(end)
1242
1248
1243 # then reset internal state in memory to forget those revisions
1249 # then reset internal state in memory to forget those revisions
1244 self.cache = None
1250 self.cache = None
1245 self.chunkcache = None
1251 self.chunkcache = None
1246 for x in xrange(rev, self.count()):
1252 for x in xrange(rev, self.count()):
1247 del self.nodemap[self.node(x)]
1253 del self.nodemap[self.node(x)]
1248
1254
1249 del self.index[rev:]
1255 del self.index[rev:]
1250
1256
1251 def checksize(self):
1257 def checksize(self):
1252 expected = 0
1258 expected = 0
1253 if self.count():
1259 if self.count():
1254 expected = self.end(self.count() - 1)
1260 expected = self.end(self.count() - 1)
1255
1261
1256 try:
1262 try:
1257 f = self.opener(self.datafile)
1263 f = self.opener(self.datafile)
1258 f.seek(0, 2)
1264 f.seek(0, 2)
1259 actual = f.tell()
1265 actual = f.tell()
1260 dd = actual - expected
1266 dd = actual - expected
1261 except IOError, inst:
1267 except IOError, inst:
1262 if inst.errno != errno.ENOENT:
1268 if inst.errno != errno.ENOENT:
1263 raise
1269 raise
1264 dd = 0
1270 dd = 0
1265
1271
1266 try:
1272 try:
1267 f = self.opener(self.indexfile)
1273 f = self.opener(self.indexfile)
1268 f.seek(0, 2)
1274 f.seek(0, 2)
1269 actual = f.tell()
1275 actual = f.tell()
1270 s = struct.calcsize(self.indexformat)
1276 s = struct.calcsize(self.indexformat)
1271 i = actual / s
1277 i = actual / s
1272 di = actual - (i * s)
1278 di = actual - (i * s)
1273 if self.inlinedata():
1279 if self.inlinedata():
1274 databytes = 0
1280 databytes = 0
1275 for r in xrange(self.count()):
1281 for r in xrange(self.count()):
1276 databytes += self.length(r)
1282 databytes += self.length(r)
1277 dd = 0
1283 dd = 0
1278 di = actual - self.count() * s - databytes
1284 di = actual - self.count() * s - databytes
1279 except IOError, inst:
1285 except IOError, inst:
1280 if inst.errno != errno.ENOENT:
1286 if inst.errno != errno.ENOENT:
1281 raise
1287 raise
1282 di = 0
1288 di = 0
1283
1289
1284 return (dd, di)
1290 return (dd, di)
1285
1291
1286
1292
@@ -1,57 +1,62 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir a
3 mkdir a
4 cd a
4 cd a
5 hg init
5 hg init
6 echo foo > t1
6 echo foo > t1
7 hg add t1
7 hg add t1
8 hg commit -m "1" -d "1000000 0"
8 hg commit -m "1" -d "1000000 0"
9
9
10 cd ..
10 cd ..
11 hg clone a b
11 hg clone a b
12
12
13 cd a
13 cd a
14 echo foo > t2
14 echo foo > t2
15 hg add t2
15 hg add t2
16 hg commit -m "2" -d "1000000 0"
16 hg commit -m "2" -d "1000000 0"
17
17
18 cd ../b
18 cd ../b
19 echo foo > t3
19 echo foo > t3
20 hg add t3
20 hg add t3
21 hg commit -m "3" -d "1000000 0"
21 hg commit -m "3" -d "1000000 0"
22
22
23 hg push ../a
23 hg push ../a
24 hg pull ../a
24 hg pull ../a
25 hg push ../a
25 hg push ../a
26 hg merge
26 hg merge
27 hg commit -m "4" -d "1000000 0"
27 hg commit -m "4" -d "1000000 0"
28 hg push ../a
28 hg push ../a
29 cd ..
29 cd ..
30
30
31 hg init c
31 hg init c
32 cd c
32 cd c
33 for i in 0 1 2; do
33 for i in 0 1 2; do
34 echo $i >> foo
34 echo $i >> foo
35 hg ci -Am $i -d "1000000 0"
35 hg ci -Am $i -d "1000000 0"
36 done
36 done
37 cd ..
37 cd ..
38
38
39 hg clone c d
39 hg clone c d
40 cd d
40 cd d
41 for i in 0 1; do
41 for i in 0 1; do
42 hg co -C $i
42 hg co -C $i
43 echo d-$i >> foo
43 echo d-$i >> foo
44 hg ci -m d-$i -d "1000000 0"
44 hg ci -m d-$i -d "1000000 0"
45 done
45 done
46
46
47 HGMERGE=true hg merge 3
47 HGMERGE=true hg merge 3
48 hg ci -m c-d -d "1000000 0"
48 hg ci -m c-d -d "1000000 0"
49
49
50 hg push ../c; echo $?
50 hg push ../c; echo $?
51 hg push -r 2 ../c; echo $?
51 hg push -r 2 ../c; echo $?
52 hg push -r 3 ../c; echo $?
52 hg push -r 3 ../c; echo $?
53 hg push -r 3 -r 4 ../c; echo $?
53 hg push -r 3 -r 4 ../c; echo $?
54 hg push -f -r 3 -r 4 ../c; echo $?
54 hg push -f -r 3 -r 4 ../c; echo $?
55 hg push -r 5 ../c; echo $?
55 hg push -r 5 ../c; echo $?
56
56
57 # issue 450
58 hg init ../e
59 hg push -r 0 ../e ; echo $?
60 hg push -r 1 ../e ; echo $?
61
57 exit 0
62 exit 0
@@ -1,64 +1,78 b''
1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 pushing to ../a
2 pushing to ../a
3 searching for changes
3 searching for changes
4 abort: push creates new remote branches!
4 abort: push creates new remote branches!
5 (did you forget to merge? use push -f to force)
5 (did you forget to merge? use push -f to force)
6 pulling from ../a
6 pulling from ../a
7 searching for changes
7 searching for changes
8 adding changesets
8 adding changesets
9 adding manifests
9 adding manifests
10 adding file changes
10 adding file changes
11 added 1 changesets with 1 changes to 1 files (+1 heads)
11 added 1 changesets with 1 changes to 1 files (+1 heads)
12 (run 'hg heads' to see heads, 'hg merge' to merge)
12 (run 'hg heads' to see heads, 'hg merge' to merge)
13 pushing to ../a
13 pushing to ../a
14 searching for changes
14 searching for changes
15 abort: push creates new remote branches!
15 abort: push creates new remote branches!
16 (did you forget to merge? use push -f to force)
16 (did you forget to merge? use push -f to force)
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 (branch merge, don't forget to commit)
18 (branch merge, don't forget to commit)
19 pushing to ../a
19 pushing to ../a
20 searching for changes
20 searching for changes
21 adding changesets
21 adding changesets
22 adding manifests
22 adding manifests
23 adding file changes
23 adding file changes
24 added 2 changesets with 1 changes to 1 files
24 added 2 changesets with 1 changes to 1 files
25 adding foo
25 adding foo
26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 merging foo
29 merging foo
30 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
30 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
31 (branch merge, don't forget to commit)
31 (branch merge, don't forget to commit)
32 pushing to ../c
32 pushing to ../c
33 searching for changes
33 searching for changes
34 abort: push creates new remote branches!
34 abort: push creates new remote branches!
35 (did you forget to merge? use push -f to force)
35 (did you forget to merge? use push -f to force)
36 0
36 0
37 pushing to ../c
37 pushing to ../c
38 searching for changes
38 searching for changes
39 no changes found
39 no changes found
40 0
40 0
41 pushing to ../c
41 pushing to ../c
42 searching for changes
42 searching for changes
43 abort: push creates new remote branches!
43 abort: push creates new remote branches!
44 (did you forget to merge? use push -f to force)
44 (did you forget to merge? use push -f to force)
45 0
45 0
46 pushing to ../c
46 pushing to ../c
47 searching for changes
47 searching for changes
48 abort: push creates new remote branches!
48 abort: push creates new remote branches!
49 (did you forget to merge? use push -f to force)
49 (did you forget to merge? use push -f to force)
50 0
50 0
51 pushing to ../c
51 pushing to ../c
52 searching for changes
52 searching for changes
53 adding changesets
53 adding changesets
54 adding manifests
54 adding manifests
55 adding file changes
55 adding file changes
56 added 2 changesets with 2 changes to 1 files (+2 heads)
56 added 2 changesets with 2 changes to 1 files (+2 heads)
57 0
57 0
58 pushing to ../c
58 pushing to ../c
59 searching for changes
59 searching for changes
60 adding changesets
60 adding changesets
61 adding manifests
61 adding manifests
62 adding file changes
62 adding file changes
63 added 1 changesets with 1 changes to 1 files (-1 heads)
63 added 1 changesets with 1 changes to 1 files (-1 heads)
64 0
64 0
65 pushing to ../e
66 searching for changes
67 adding changesets
68 adding manifests
69 adding file changes
70 added 1 changesets with 1 changes to 1 files
71 0
72 pushing to ../e
73 searching for changes
74 adding changesets
75 adding manifests
76 adding file changes
77 added 1 changesets with 1 changes to 1 files
78 0
General Comments 0
You need to be logged in to leave comments. Login now