##// END OF EJS Templates
Add branch name to editor text.
Simon 'corecode' Schubert -
r4020:dbf250b8 default
parent child Browse files
Show More
@@ -1,1882 +1,1884 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.filterpats = {}
120 self.filterpats = {}
121 self.transhandle = None
121 self.transhandle = None
122
122
123 self._link = lambda x: False
123 self._link = lambda x: False
124 if util.checklink(self.root):
124 if util.checklink(self.root):
125 r = self.root # avoid circular reference in lambda
125 r = self.root # avoid circular reference in lambda
126 self._link = lambda x: util.is_link(os.path.join(r, x))
126 self._link = lambda x: util.is_link(os.path.join(r, x))
127
127
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129
129
130 def url(self):
130 def url(self):
131 return 'file:' + self.root
131 return 'file:' + self.root
132
132
133 def hook(self, name, throw=False, **args):
133 def hook(self, name, throw=False, **args):
134 def callhook(hname, funcname):
134 def callhook(hname, funcname):
135 '''call python hook. hook is callable object, looked up as
135 '''call python hook. hook is callable object, looked up as
136 name in python module. if callable returns "true", hook
136 name in python module. if callable returns "true", hook
137 fails, else passes. if hook raises exception, treated as
137 fails, else passes. if hook raises exception, treated as
138 hook failure. exception propagates if throw is "true".
138 hook failure. exception propagates if throw is "true".
139
139
140 reason for "true" meaning "hook failed" is so that
140 reason for "true" meaning "hook failed" is so that
141 unmodified commands (e.g. mercurial.commands.update) can
141 unmodified commands (e.g. mercurial.commands.update) can
142 be run as hooks without wrappers to convert return values.'''
142 be run as hooks without wrappers to convert return values.'''
143
143
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 d = funcname.rfind('.')
145 d = funcname.rfind('.')
146 if d == -1:
146 if d == -1:
147 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
147 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
148 % (hname, funcname))
148 % (hname, funcname))
149 modname = funcname[:d]
149 modname = funcname[:d]
150 try:
150 try:
151 obj = __import__(modname)
151 obj = __import__(modname)
152 except ImportError:
152 except ImportError:
153 try:
153 try:
154 # extensions are loaded with hgext_ prefix
154 # extensions are loaded with hgext_ prefix
155 obj = __import__("hgext_%s" % modname)
155 obj = __import__("hgext_%s" % modname)
156 except ImportError:
156 except ImportError:
157 raise util.Abort(_('%s hook is invalid '
157 raise util.Abort(_('%s hook is invalid '
158 '(import of "%s" failed)') %
158 '(import of "%s" failed)') %
159 (hname, modname))
159 (hname, modname))
160 try:
160 try:
161 for p in funcname.split('.')[1:]:
161 for p in funcname.split('.')[1:]:
162 obj = getattr(obj, p)
162 obj = getattr(obj, p)
163 except AttributeError, err:
163 except AttributeError, err:
164 raise util.Abort(_('%s hook is invalid '
164 raise util.Abort(_('%s hook is invalid '
165 '("%s" is not defined)') %
165 '("%s" is not defined)') %
166 (hname, funcname))
166 (hname, funcname))
167 if not callable(obj):
167 if not callable(obj):
168 raise util.Abort(_('%s hook is invalid '
168 raise util.Abort(_('%s hook is invalid '
169 '("%s" is not callable)') %
169 '("%s" is not callable)') %
170 (hname, funcname))
170 (hname, funcname))
171 try:
171 try:
172 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
172 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
173 except (KeyboardInterrupt, util.SignalInterrupt):
173 except (KeyboardInterrupt, util.SignalInterrupt):
174 raise
174 raise
175 except Exception, exc:
175 except Exception, exc:
176 if isinstance(exc, util.Abort):
176 if isinstance(exc, util.Abort):
177 self.ui.warn(_('error: %s hook failed: %s\n') %
177 self.ui.warn(_('error: %s hook failed: %s\n') %
178 (hname, exc.args[0]))
178 (hname, exc.args[0]))
179 else:
179 else:
180 self.ui.warn(_('error: %s hook raised an exception: '
180 self.ui.warn(_('error: %s hook raised an exception: '
181 '%s\n') % (hname, exc))
181 '%s\n') % (hname, exc))
182 if throw:
182 if throw:
183 raise
183 raise
184 self.ui.print_exc()
184 self.ui.print_exc()
185 return True
185 return True
186 if r:
186 if r:
187 if throw:
187 if throw:
188 raise util.Abort(_('%s hook failed') % hname)
188 raise util.Abort(_('%s hook failed') % hname)
189 self.ui.warn(_('warning: %s hook failed\n') % hname)
189 self.ui.warn(_('warning: %s hook failed\n') % hname)
190 return r
190 return r
191
191
192 def runhook(name, cmd):
192 def runhook(name, cmd):
193 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
193 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
194 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
194 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
195 r = util.system(cmd, environ=env, cwd=self.root)
195 r = util.system(cmd, environ=env, cwd=self.root)
196 if r:
196 if r:
197 desc, r = util.explain_exit(r)
197 desc, r = util.explain_exit(r)
198 if throw:
198 if throw:
199 raise util.Abort(_('%s hook %s') % (name, desc))
199 raise util.Abort(_('%s hook %s') % (name, desc))
200 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
200 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
201 return r
201 return r
202
202
203 r = False
203 r = False
204 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
204 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
205 if hname.split(".", 1)[0] == name and cmd]
205 if hname.split(".", 1)[0] == name and cmd]
206 hooks.sort()
206 hooks.sort()
207 for hname, cmd in hooks:
207 for hname, cmd in hooks:
208 if cmd.startswith('python:'):
208 if cmd.startswith('python:'):
209 r = callhook(hname, cmd[7:].strip()) or r
209 r = callhook(hname, cmd[7:].strip()) or r
210 else:
210 else:
211 r = runhook(hname, cmd) or r
211 r = runhook(hname, cmd) or r
212 return r
212 return r
213
213
214 tag_disallowed = ':\r\n'
214 tag_disallowed = ':\r\n'
215
215
216 def tag(self, name, node, message, local, user, date):
216 def tag(self, name, node, message, local, user, date):
217 '''tag a revision with a symbolic name.
217 '''tag a revision with a symbolic name.
218
218
219 if local is True, the tag is stored in a per-repository file.
219 if local is True, the tag is stored in a per-repository file.
220 otherwise, it is stored in the .hgtags file, and a new
220 otherwise, it is stored in the .hgtags file, and a new
221 changeset is committed with the change.
221 changeset is committed with the change.
222
222
223 keyword arguments:
223 keyword arguments:
224
224
225 local: whether to store tag in non-version-controlled file
225 local: whether to store tag in non-version-controlled file
226 (default False)
226 (default False)
227
227
228 message: commit message to use if committing
228 message: commit message to use if committing
229
229
230 user: name of user to use if committing
230 user: name of user to use if committing
231
231
232 date: date tuple to use if committing'''
232 date: date tuple to use if committing'''
233
233
234 for c in self.tag_disallowed:
234 for c in self.tag_disallowed:
235 if c in name:
235 if c in name:
236 raise util.Abort(_('%r cannot be used in a tag name') % c)
236 raise util.Abort(_('%r cannot be used in a tag name') % c)
237
237
238 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
238 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
239
239
240 if local:
240 if local:
241 # local tags are stored in the current charset
241 # local tags are stored in the current charset
242 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
242 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
243 self.hook('tag', node=hex(node), tag=name, local=local)
243 self.hook('tag', node=hex(node), tag=name, local=local)
244 return
244 return
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251 # committed tags are stored in UTF-8
251 # committed tags are stored in UTF-8
252 line = '%s %s\n' % (hex(node), util.fromlocal(name))
252 line = '%s %s\n' % (hex(node), util.fromlocal(name))
253 self.wfile('.hgtags', 'ab').write(line)
253 self.wfile('.hgtags', 'ab').write(line)
254 if self.dirstate.state('.hgtags') == '?':
254 if self.dirstate.state('.hgtags') == '?':
255 self.add(['.hgtags'])
255 self.add(['.hgtags'])
256
256
257 self.commit(['.hgtags'], message, user, date)
257 self.commit(['.hgtags'], message, user, date)
258 self.hook('tag', node=hex(node), tag=name, local=local)
258 self.hook('tag', node=hex(node), tag=name, local=local)
259
259
260 def tags(self):
260 def tags(self):
261 '''return a mapping of tag to node'''
261 '''return a mapping of tag to node'''
262 if not self.tagscache:
262 if not self.tagscache:
263 self.tagscache = {}
263 self.tagscache = {}
264
264
265 def parsetag(line, context):
265 def parsetag(line, context):
266 if not line:
266 if not line:
267 return
267 return
268 s = l.split(" ", 1)
268 s = l.split(" ", 1)
269 if len(s) != 2:
269 if len(s) != 2:
270 self.ui.warn(_("%s: cannot parse entry\n") % context)
270 self.ui.warn(_("%s: cannot parse entry\n") % context)
271 return
271 return
272 node, key = s
272 node, key = s
273 key = util.tolocal(key.strip()) # stored in UTF-8
273 key = util.tolocal(key.strip()) # stored in UTF-8
274 try:
274 try:
275 bin_n = bin(node)
275 bin_n = bin(node)
276 except TypeError:
276 except TypeError:
277 self.ui.warn(_("%s: node '%s' is not well formed\n") %
277 self.ui.warn(_("%s: node '%s' is not well formed\n") %
278 (context, node))
278 (context, node))
279 return
279 return
280 if bin_n not in self.changelog.nodemap:
280 if bin_n not in self.changelog.nodemap:
281 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
281 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
282 (context, key))
282 (context, key))
283 return
283 return
284 self.tagscache[key] = bin_n
284 self.tagscache[key] = bin_n
285
285
286 # read the tags file from each head, ending with the tip,
286 # read the tags file from each head, ending with the tip,
287 # and add each tag found to the map, with "newer" ones
287 # and add each tag found to the map, with "newer" ones
288 # taking precedence
288 # taking precedence
289 f = None
289 f = None
290 for rev, node, fnode in self._hgtagsnodes():
290 for rev, node, fnode in self._hgtagsnodes():
291 f = (f and f.filectx(fnode) or
291 f = (f and f.filectx(fnode) or
292 self.filectx('.hgtags', fileid=fnode))
292 self.filectx('.hgtags', fileid=fnode))
293 count = 0
293 count = 0
294 for l in f.data().splitlines():
294 for l in f.data().splitlines():
295 count += 1
295 count += 1
296 parsetag(l, _("%s, line %d") % (str(f), count))
296 parsetag(l, _("%s, line %d") % (str(f), count))
297
297
298 try:
298 try:
299 f = self.opener("localtags")
299 f = self.opener("localtags")
300 count = 0
300 count = 0
301 for l in f:
301 for l in f:
302 # localtags are stored in the local character set
302 # localtags are stored in the local character set
303 # while the internal tag table is stored in UTF-8
303 # while the internal tag table is stored in UTF-8
304 l = util.fromlocal(l)
304 l = util.fromlocal(l)
305 count += 1
305 count += 1
306 parsetag(l, _("localtags, line %d") % count)
306 parsetag(l, _("localtags, line %d") % count)
307 except IOError:
307 except IOError:
308 pass
308 pass
309
309
310 self.tagscache['tip'] = self.changelog.tip()
310 self.tagscache['tip'] = self.changelog.tip()
311
311
312 return self.tagscache
312 return self.tagscache
313
313
314 def _hgtagsnodes(self):
314 def _hgtagsnodes(self):
315 heads = self.heads()
315 heads = self.heads()
316 heads.reverse()
316 heads.reverse()
317 last = {}
317 last = {}
318 ret = []
318 ret = []
319 for node in heads:
319 for node in heads:
320 c = self.changectx(node)
320 c = self.changectx(node)
321 rev = c.rev()
321 rev = c.rev()
322 try:
322 try:
323 fnode = c.filenode('.hgtags')
323 fnode = c.filenode('.hgtags')
324 except revlog.LookupError:
324 except revlog.LookupError:
325 continue
325 continue
326 ret.append((rev, node, fnode))
326 ret.append((rev, node, fnode))
327 if fnode in last:
327 if fnode in last:
328 ret[last[fnode]] = None
328 ret[last[fnode]] = None
329 last[fnode] = len(ret) - 1
329 last[fnode] = len(ret) - 1
330 return [item for item in ret if item]
330 return [item for item in ret if item]
331
331
332 def tagslist(self):
332 def tagslist(self):
333 '''return a list of tags ordered by revision'''
333 '''return a list of tags ordered by revision'''
334 l = []
334 l = []
335 for t, n in self.tags().items():
335 for t, n in self.tags().items():
336 try:
336 try:
337 r = self.changelog.rev(n)
337 r = self.changelog.rev(n)
338 except:
338 except:
339 r = -2 # sort to the beginning of the list if unknown
339 r = -2 # sort to the beginning of the list if unknown
340 l.append((r, t, n))
340 l.append((r, t, n))
341 l.sort()
341 l.sort()
342 return [(t, n) for r, t, n in l]
342 return [(t, n) for r, t, n in l]
343
343
344 def nodetags(self, node):
344 def nodetags(self, node):
345 '''return the tags associated with a node'''
345 '''return the tags associated with a node'''
346 if not self.nodetagscache:
346 if not self.nodetagscache:
347 self.nodetagscache = {}
347 self.nodetagscache = {}
348 for t, n in self.tags().items():
348 for t, n in self.tags().items():
349 self.nodetagscache.setdefault(n, []).append(t)
349 self.nodetagscache.setdefault(n, []).append(t)
350 return self.nodetagscache.get(node, [])
350 return self.nodetagscache.get(node, [])
351
351
352 def _branchtags(self):
352 def _branchtags(self):
353 partial, last, lrev = self._readbranchcache()
353 partial, last, lrev = self._readbranchcache()
354
354
355 tiprev = self.changelog.count() - 1
355 tiprev = self.changelog.count() - 1
356 if lrev != tiprev:
356 if lrev != tiprev:
357 self._updatebranchcache(partial, lrev+1, tiprev+1)
357 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._writebranchcache(partial, self.changelog.tip(), tiprev)
358 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359
359
360 return partial
360 return partial
361
361
362 def branchtags(self):
362 def branchtags(self):
363 if self.branchcache is not None:
363 if self.branchcache is not None:
364 return self.branchcache
364 return self.branchcache
365
365
366 self.branchcache = {} # avoid recursion in changectx
366 self.branchcache = {} # avoid recursion in changectx
367 partial = self._branchtags()
367 partial = self._branchtags()
368
368
369 # the branch cache is stored on disk as UTF-8, but in the local
369 # the branch cache is stored on disk as UTF-8, but in the local
370 # charset internally
370 # charset internally
371 for k, v in partial.items():
371 for k, v in partial.items():
372 self.branchcache[util.tolocal(k)] = v
372 self.branchcache[util.tolocal(k)] = v
373 return self.branchcache
373 return self.branchcache
374
374
375 def _readbranchcache(self):
375 def _readbranchcache(self):
376 partial = {}
376 partial = {}
377 try:
377 try:
378 f = self.opener("branches.cache")
378 f = self.opener("branches.cache")
379 lines = f.read().split('\n')
379 lines = f.read().split('\n')
380 f.close()
380 f.close()
381 last, lrev = lines.pop(0).rstrip().split(" ", 1)
381 last, lrev = lines.pop(0).rstrip().split(" ", 1)
382 last, lrev = bin(last), int(lrev)
382 last, lrev = bin(last), int(lrev)
383 if not (lrev < self.changelog.count() and
383 if not (lrev < self.changelog.count() and
384 self.changelog.node(lrev) == last): # sanity check
384 self.changelog.node(lrev) == last): # sanity check
385 # invalidate the cache
385 # invalidate the cache
386 raise ValueError('Invalid branch cache: unknown tip')
386 raise ValueError('Invalid branch cache: unknown tip')
387 for l in lines:
387 for l in lines:
388 if not l: continue
388 if not l: continue
389 node, label = l.rstrip().split(" ", 1)
389 node, label = l.rstrip().split(" ", 1)
390 partial[label] = bin(node)
390 partial[label] = bin(node)
391 except (KeyboardInterrupt, util.SignalInterrupt):
391 except (KeyboardInterrupt, util.SignalInterrupt):
392 raise
392 raise
393 except Exception, inst:
393 except Exception, inst:
394 if self.ui.debugflag:
394 if self.ui.debugflag:
395 self.ui.warn(str(inst), '\n')
395 self.ui.warn(str(inst), '\n')
396 partial, last, lrev = {}, nullid, nullrev
396 partial, last, lrev = {}, nullid, nullrev
397 return partial, last, lrev
397 return partial, last, lrev
398
398
399 def _writebranchcache(self, branches, tip, tiprev):
399 def _writebranchcache(self, branches, tip, tiprev):
400 try:
400 try:
401 f = self.opener("branches.cache", "w")
401 f = self.opener("branches.cache", "w")
402 f.write("%s %s\n" % (hex(tip), tiprev))
402 f.write("%s %s\n" % (hex(tip), tiprev))
403 for label, node in branches.iteritems():
403 for label, node in branches.iteritems():
404 f.write("%s %s\n" % (hex(node), label))
404 f.write("%s %s\n" % (hex(node), label))
405 except IOError:
405 except IOError:
406 pass
406 pass
407
407
408 def _updatebranchcache(self, partial, start, end):
408 def _updatebranchcache(self, partial, start, end):
409 for r in xrange(start, end):
409 for r in xrange(start, end):
410 c = self.changectx(r)
410 c = self.changectx(r)
411 b = c.branch()
411 b = c.branch()
412 if b:
412 if b:
413 partial[b] = c.node()
413 partial[b] = c.node()
414
414
415 def lookup(self, key):
415 def lookup(self, key):
416 if key == '.':
416 if key == '.':
417 key = self.dirstate.parents()[0]
417 key = self.dirstate.parents()[0]
418 if key == nullid:
418 if key == nullid:
419 raise repo.RepoError(_("no revision checked out"))
419 raise repo.RepoError(_("no revision checked out"))
420 elif key == 'null':
420 elif key == 'null':
421 return nullid
421 return nullid
422 n = self.changelog._match(key)
422 n = self.changelog._match(key)
423 if n:
423 if n:
424 return n
424 return n
425 if key in self.tags():
425 if key in self.tags():
426 return self.tags()[key]
426 return self.tags()[key]
427 if key in self.branchtags():
427 if key in self.branchtags():
428 return self.branchtags()[key]
428 return self.branchtags()[key]
429 n = self.changelog._partialmatch(key)
429 n = self.changelog._partialmatch(key)
430 if n:
430 if n:
431 return n
431 return n
432 raise repo.RepoError(_("unknown revision '%s'") % key)
432 raise repo.RepoError(_("unknown revision '%s'") % key)
433
433
434 def dev(self):
434 def dev(self):
435 return os.lstat(self.path).st_dev
435 return os.lstat(self.path).st_dev
436
436
437 def local(self):
437 def local(self):
438 return True
438 return True
439
439
440 def join(self, f):
440 def join(self, f):
441 return os.path.join(self.path, f)
441 return os.path.join(self.path, f)
442
442
443 def sjoin(self, f):
443 def sjoin(self, f):
444 f = self.encodefn(f)
444 f = self.encodefn(f)
445 return os.path.join(self.spath, f)
445 return os.path.join(self.spath, f)
446
446
447 def wjoin(self, f):
447 def wjoin(self, f):
448 return os.path.join(self.root, f)
448 return os.path.join(self.root, f)
449
449
450 def file(self, f):
450 def file(self, f):
451 if f[0] == '/':
451 if f[0] == '/':
452 f = f[1:]
452 f = f[1:]
453 return filelog.filelog(self.sopener, f, self.revlogversion)
453 return filelog.filelog(self.sopener, f, self.revlogversion)
454
454
455 def changectx(self, changeid=None):
455 def changectx(self, changeid=None):
456 return context.changectx(self, changeid)
456 return context.changectx(self, changeid)
457
457
458 def workingctx(self):
458 def workingctx(self):
459 return context.workingctx(self)
459 return context.workingctx(self)
460
460
461 def parents(self, changeid=None):
461 def parents(self, changeid=None):
462 '''
462 '''
463 get list of changectxs for parents of changeid or working directory
463 get list of changectxs for parents of changeid or working directory
464 '''
464 '''
465 if changeid is None:
465 if changeid is None:
466 pl = self.dirstate.parents()
466 pl = self.dirstate.parents()
467 else:
467 else:
468 n = self.changelog.lookup(changeid)
468 n = self.changelog.lookup(changeid)
469 pl = self.changelog.parents(n)
469 pl = self.changelog.parents(n)
470 if pl[1] == nullid:
470 if pl[1] == nullid:
471 return [self.changectx(pl[0])]
471 return [self.changectx(pl[0])]
472 return [self.changectx(pl[0]), self.changectx(pl[1])]
472 return [self.changectx(pl[0]), self.changectx(pl[1])]
473
473
474 def filectx(self, path, changeid=None, fileid=None):
474 def filectx(self, path, changeid=None, fileid=None):
475 """changeid can be a changeset revision, node, or tag.
475 """changeid can be a changeset revision, node, or tag.
476 fileid can be a file revision or node."""
476 fileid can be a file revision or node."""
477 return context.filectx(self, path, changeid, fileid)
477 return context.filectx(self, path, changeid, fileid)
478
478
479 def getcwd(self):
479 def getcwd(self):
480 return self.dirstate.getcwd()
480 return self.dirstate.getcwd()
481
481
482 def wfile(self, f, mode='r'):
482 def wfile(self, f, mode='r'):
483 return self.wopener(f, mode)
483 return self.wopener(f, mode)
484
484
485 def _filter(self, filter, filename, data):
485 def _filter(self, filter, filename, data):
486 if filter not in self.filterpats:
486 if filter not in self.filterpats:
487 l = []
487 l = []
488 for pat, cmd in self.ui.configitems(filter):
488 for pat, cmd in self.ui.configitems(filter):
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
490 l.append((mf, cmd))
490 l.append((mf, cmd))
491 self.filterpats[filter] = l
491 self.filterpats[filter] = l
492
492
493 for mf, cmd in self.filterpats[filter]:
493 for mf, cmd in self.filterpats[filter]:
494 if mf(filename):
494 if mf(filename):
495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
496 data = util.filter(data, cmd)
496 data = util.filter(data, cmd)
497 break
497 break
498
498
499 return data
499 return data
500
500
501 def wread(self, filename):
501 def wread(self, filename):
502 if self._link(filename):
502 if self._link(filename):
503 data = os.readlink(self.wjoin(filename))
503 data = os.readlink(self.wjoin(filename))
504 else:
504 else:
505 data = self.wopener(filename, 'r').read()
505 data = self.wopener(filename, 'r').read()
506 return self._filter("encode", filename, data)
506 return self._filter("encode", filename, data)
507
507
508 def wwrite(self, filename, data, flags):
508 def wwrite(self, filename, data, flags):
509 data = self._filter("decode", filename, data)
509 data = self._filter("decode", filename, data)
510 if "l" in flags:
510 if "l" in flags:
511 try:
511 try:
512 os.unlink(self.wjoin(filename))
512 os.unlink(self.wjoin(filename))
513 except OSError:
513 except OSError:
514 pass
514 pass
515 os.symlink(data, self.wjoin(filename))
515 os.symlink(data, self.wjoin(filename))
516 else:
516 else:
517 try:
517 try:
518 if self._link(filename):
518 if self._link(filename):
519 os.unlink(self.wjoin(filename))
519 os.unlink(self.wjoin(filename))
520 except OSError:
520 except OSError:
521 pass
521 pass
522 self.wopener(filename, 'w').write(data)
522 self.wopener(filename, 'w').write(data)
523 util.set_exec(self.wjoin(filename), "x" in flags)
523 util.set_exec(self.wjoin(filename), "x" in flags)
524
524
525 def wwritedata(self, filename, data):
525 def wwritedata(self, filename, data):
526 return self._filter("decode", filename, data)
526 return self._filter("decode", filename, data)
527
527
528 def transaction(self):
528 def transaction(self):
529 tr = self.transhandle
529 tr = self.transhandle
530 if tr != None and tr.running():
530 if tr != None and tr.running():
531 return tr.nest()
531 return tr.nest()
532
532
533 # save dirstate for rollback
533 # save dirstate for rollback
534 try:
534 try:
535 ds = self.opener("dirstate").read()
535 ds = self.opener("dirstate").read()
536 except IOError:
536 except IOError:
537 ds = ""
537 ds = ""
538 self.opener("journal.dirstate", "w").write(ds)
538 self.opener("journal.dirstate", "w").write(ds)
539
539
540 renames = [(self.sjoin("journal"), self.sjoin("undo")),
540 renames = [(self.sjoin("journal"), self.sjoin("undo")),
541 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
541 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
542 tr = transaction.transaction(self.ui.warn, self.sopener,
542 tr = transaction.transaction(self.ui.warn, self.sopener,
543 self.sjoin("journal"),
543 self.sjoin("journal"),
544 aftertrans(renames))
544 aftertrans(renames))
545 self.transhandle = tr
545 self.transhandle = tr
546 return tr
546 return tr
547
547
548 def recover(self):
548 def recover(self):
549 l = self.lock()
549 l = self.lock()
550 if os.path.exists(self.sjoin("journal")):
550 if os.path.exists(self.sjoin("journal")):
551 self.ui.status(_("rolling back interrupted transaction\n"))
551 self.ui.status(_("rolling back interrupted transaction\n"))
552 transaction.rollback(self.sopener, self.sjoin("journal"))
552 transaction.rollback(self.sopener, self.sjoin("journal"))
553 self.reload()
553 self.reload()
554 return True
554 return True
555 else:
555 else:
556 self.ui.warn(_("no interrupted transaction available\n"))
556 self.ui.warn(_("no interrupted transaction available\n"))
557 return False
557 return False
558
558
559 def rollback(self, wlock=None):
559 def rollback(self, wlock=None):
560 if not wlock:
560 if not wlock:
561 wlock = self.wlock()
561 wlock = self.wlock()
562 l = self.lock()
562 l = self.lock()
563 if os.path.exists(self.sjoin("undo")):
563 if os.path.exists(self.sjoin("undo")):
564 self.ui.status(_("rolling back last transaction\n"))
564 self.ui.status(_("rolling back last transaction\n"))
565 transaction.rollback(self.sopener, self.sjoin("undo"))
565 transaction.rollback(self.sopener, self.sjoin("undo"))
566 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
566 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
567 self.reload()
567 self.reload()
568 self.wreload()
568 self.wreload()
569 else:
569 else:
570 self.ui.warn(_("no rollback information available\n"))
570 self.ui.warn(_("no rollback information available\n"))
571
571
572 def wreload(self):
572 def wreload(self):
573 self.dirstate.read()
573 self.dirstate.read()
574
574
575 def reload(self):
575 def reload(self):
576 self.changelog.load()
576 self.changelog.load()
577 self.manifest.load()
577 self.manifest.load()
578 self.tagscache = None
578 self.tagscache = None
579 self.nodetagscache = None
579 self.nodetagscache = None
580
580
581 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
581 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
582 desc=None):
582 desc=None):
583 try:
583 try:
584 l = lock.lock(lockname, 0, releasefn, desc=desc)
584 l = lock.lock(lockname, 0, releasefn, desc=desc)
585 except lock.LockHeld, inst:
585 except lock.LockHeld, inst:
586 if not wait:
586 if not wait:
587 raise
587 raise
588 self.ui.warn(_("waiting for lock on %s held by %r\n") %
588 self.ui.warn(_("waiting for lock on %s held by %r\n") %
589 (desc, inst.locker))
589 (desc, inst.locker))
590 # default to 600 seconds timeout
590 # default to 600 seconds timeout
591 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
591 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
592 releasefn, desc=desc)
592 releasefn, desc=desc)
593 if acquirefn:
593 if acquirefn:
594 acquirefn()
594 acquirefn()
595 return l
595 return l
596
596
597 def lock(self, wait=1):
597 def lock(self, wait=1):
598 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
598 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
599 desc=_('repository %s') % self.origroot)
599 desc=_('repository %s') % self.origroot)
600
600
601 def wlock(self, wait=1):
601 def wlock(self, wait=1):
602 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
602 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
603 self.wreload,
603 self.wreload,
604 desc=_('working directory of %s') % self.origroot)
604 desc=_('working directory of %s') % self.origroot)
605
605
606 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
606 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
607 """
607 """
608 commit an individual file as part of a larger transaction
608 commit an individual file as part of a larger transaction
609 """
609 """
610
610
611 t = self.wread(fn)
611 t = self.wread(fn)
612 fl = self.file(fn)
612 fl = self.file(fn)
613 fp1 = manifest1.get(fn, nullid)
613 fp1 = manifest1.get(fn, nullid)
614 fp2 = manifest2.get(fn, nullid)
614 fp2 = manifest2.get(fn, nullid)
615
615
616 meta = {}
616 meta = {}
617 cp = self.dirstate.copied(fn)
617 cp = self.dirstate.copied(fn)
618 if cp:
618 if cp:
619 meta["copy"] = cp
619 meta["copy"] = cp
620 if not manifest2: # not a branch merge
620 if not manifest2: # not a branch merge
621 meta["copyrev"] = hex(manifest1.get(cp, nullid))
621 meta["copyrev"] = hex(manifest1.get(cp, nullid))
622 fp2 = nullid
622 fp2 = nullid
623 elif fp2 != nullid: # copied on remote side
623 elif fp2 != nullid: # copied on remote side
624 meta["copyrev"] = hex(manifest1.get(cp, nullid))
624 meta["copyrev"] = hex(manifest1.get(cp, nullid))
625 elif fp1 != nullid: # copied on local side, reversed
625 elif fp1 != nullid: # copied on local side, reversed
626 meta["copyrev"] = hex(manifest2.get(cp))
626 meta["copyrev"] = hex(manifest2.get(cp))
627 fp2 = nullid
627 fp2 = nullid
628 else: # directory rename
628 else: # directory rename
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 self.ui.debug(_(" %s: copy %s:%s\n") %
630 self.ui.debug(_(" %s: copy %s:%s\n") %
631 (fn, cp, meta["copyrev"]))
631 (fn, cp, meta["copyrev"]))
632 fp1 = nullid
632 fp1 = nullid
633 elif fp2 != nullid:
633 elif fp2 != nullid:
634 # is one parent an ancestor of the other?
634 # is one parent an ancestor of the other?
635 fpa = fl.ancestor(fp1, fp2)
635 fpa = fl.ancestor(fp1, fp2)
636 if fpa == fp1:
636 if fpa == fp1:
637 fp1, fp2 = fp2, nullid
637 fp1, fp2 = fp2, nullid
638 elif fpa == fp2:
638 elif fpa == fp2:
639 fp2 = nullid
639 fp2 = nullid
640
640
641 # is the file unmodified from the parent? report existing entry
641 # is the file unmodified from the parent? report existing entry
642 if fp2 == nullid and not fl.cmp(fp1, t):
642 if fp2 == nullid and not fl.cmp(fp1, t):
643 return fp1
643 return fp1
644
644
645 changelist.append(fn)
645 changelist.append(fn)
646 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
646 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
647
647
648 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
648 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
649 if p1 is None:
649 if p1 is None:
650 p1, p2 = self.dirstate.parents()
650 p1, p2 = self.dirstate.parents()
651 return self.commit(files=files, text=text, user=user, date=date,
651 return self.commit(files=files, text=text, user=user, date=date,
652 p1=p1, p2=p2, wlock=wlock, extra=extra)
652 p1=p1, p2=p2, wlock=wlock, extra=extra)
653
653
654 def commit(self, files=None, text="", user=None, date=None,
654 def commit(self, files=None, text="", user=None, date=None,
655 match=util.always, force=False, lock=None, wlock=None,
655 match=util.always, force=False, lock=None, wlock=None,
656 force_editor=False, p1=None, p2=None, extra={}):
656 force_editor=False, p1=None, p2=None, extra={}):
657
657
658 commit = []
658 commit = []
659 remove = []
659 remove = []
660 changed = []
660 changed = []
661 use_dirstate = (p1 is None) # not rawcommit
661 use_dirstate = (p1 is None) # not rawcommit
662 extra = extra.copy()
662 extra = extra.copy()
663
663
664 if use_dirstate:
664 if use_dirstate:
665 if files:
665 if files:
666 for f in files:
666 for f in files:
667 s = self.dirstate.state(f)
667 s = self.dirstate.state(f)
668 if s in 'nmai':
668 if s in 'nmai':
669 commit.append(f)
669 commit.append(f)
670 elif s == 'r':
670 elif s == 'r':
671 remove.append(f)
671 remove.append(f)
672 else:
672 else:
673 self.ui.warn(_("%s not tracked!\n") % f)
673 self.ui.warn(_("%s not tracked!\n") % f)
674 else:
674 else:
675 changes = self.status(match=match)[:5]
675 changes = self.status(match=match)[:5]
676 modified, added, removed, deleted, unknown = changes
676 modified, added, removed, deleted, unknown = changes
677 commit = modified + added
677 commit = modified + added
678 remove = removed
678 remove = removed
679 else:
679 else:
680 commit = files
680 commit = files
681
681
682 if use_dirstate:
682 if use_dirstate:
683 p1, p2 = self.dirstate.parents()
683 p1, p2 = self.dirstate.parents()
684 update_dirstate = True
684 update_dirstate = True
685 else:
685 else:
686 p1, p2 = p1, p2 or nullid
686 p1, p2 = p1, p2 or nullid
687 update_dirstate = (self.dirstate.parents()[0] == p1)
687 update_dirstate = (self.dirstate.parents()[0] == p1)
688
688
689 c1 = self.changelog.read(p1)
689 c1 = self.changelog.read(p1)
690 c2 = self.changelog.read(p2)
690 c2 = self.changelog.read(p2)
691 m1 = self.manifest.read(c1[0]).copy()
691 m1 = self.manifest.read(c1[0]).copy()
692 m2 = self.manifest.read(c2[0])
692 m2 = self.manifest.read(c2[0])
693
693
694 if use_dirstate:
694 if use_dirstate:
695 branchname = self.workingctx().branch()
695 branchname = self.workingctx().branch()
696 try:
696 try:
697 branchname = branchname.decode('UTF-8').encode('UTF-8')
697 branchname = branchname.decode('UTF-8').encode('UTF-8')
698 except UnicodeDecodeError:
698 except UnicodeDecodeError:
699 raise util.Abort(_('branch name not in UTF-8!'))
699 raise util.Abort(_('branch name not in UTF-8!'))
700 else:
700 else:
701 branchname = ""
701 branchname = ""
702
702
703 if use_dirstate:
703 if use_dirstate:
704 oldname = c1[5].get("branch", "") # stored in UTF-8
704 oldname = c1[5].get("branch", "") # stored in UTF-8
705 if not commit and not remove and not force and p2 == nullid and \
705 if not commit and not remove and not force and p2 == nullid and \
706 branchname == oldname:
706 branchname == oldname:
707 self.ui.status(_("nothing changed\n"))
707 self.ui.status(_("nothing changed\n"))
708 return None
708 return None
709
709
710 xp1 = hex(p1)
710 xp1 = hex(p1)
711 if p2 == nullid: xp2 = ''
711 if p2 == nullid: xp2 = ''
712 else: xp2 = hex(p2)
712 else: xp2 = hex(p2)
713
713
714 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
714 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
715
715
716 if not wlock:
716 if not wlock:
717 wlock = self.wlock()
717 wlock = self.wlock()
718 if not lock:
718 if not lock:
719 lock = self.lock()
719 lock = self.lock()
720 tr = self.transaction()
720 tr = self.transaction()
721
721
722 # check in files
722 # check in files
723 new = {}
723 new = {}
724 linkrev = self.changelog.count()
724 linkrev = self.changelog.count()
725 commit.sort()
725 commit.sort()
726 is_exec = util.execfunc(self.root, m1.execf)
726 is_exec = util.execfunc(self.root, m1.execf)
727 is_link = util.linkfunc(self.root, m1.linkf)
727 is_link = util.linkfunc(self.root, m1.linkf)
728 for f in commit:
728 for f in commit:
729 self.ui.note(f + "\n")
729 self.ui.note(f + "\n")
730 try:
730 try:
731 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
731 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
732 m1.set(f, is_exec(f), is_link(f))
732 m1.set(f, is_exec(f), is_link(f))
733 except OSError:
733 except OSError:
734 if use_dirstate:
734 if use_dirstate:
735 self.ui.warn(_("trouble committing %s!\n") % f)
735 self.ui.warn(_("trouble committing %s!\n") % f)
736 raise
736 raise
737 else:
737 else:
738 remove.append(f)
738 remove.append(f)
739
739
740 # update manifest
740 # update manifest
741 m1.update(new)
741 m1.update(new)
742 remove.sort()
742 remove.sort()
743 removed = []
743 removed = []
744
744
745 for f in remove:
745 for f in remove:
746 if f in m1:
746 if f in m1:
747 del m1[f]
747 del m1[f]
748 removed.append(f)
748 removed.append(f)
749 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
749 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
750
750
751 # add changeset
751 # add changeset
752 new = new.keys()
752 new = new.keys()
753 new.sort()
753 new.sort()
754
754
755 user = user or self.ui.username()
755 user = user or self.ui.username()
756 if not text or force_editor:
756 if not text or force_editor:
757 edittext = []
757 edittext = []
758 if text:
758 if text:
759 edittext.append(text)
759 edittext.append(text)
760 edittext.append("")
760 edittext.append("")
761 edittext.append("HG: user: %s" % user)
761 edittext.append("HG: user: %s" % user)
762 if p2 != nullid:
762 if p2 != nullid:
763 edittext.append("HG: branch merge")
763 edittext.append("HG: branch merge")
764 if branchname:
765 edittext.append("HG: branch %s" % branchname)
764 edittext.extend(["HG: changed %s" % f for f in changed])
766 edittext.extend(["HG: changed %s" % f for f in changed])
765 edittext.extend(["HG: removed %s" % f for f in removed])
767 edittext.extend(["HG: removed %s" % f for f in removed])
766 if not changed and not remove:
768 if not changed and not remove:
767 edittext.append("HG: no files changed")
769 edittext.append("HG: no files changed")
768 edittext.append("")
770 edittext.append("")
769 # run editor in the repository root
771 # run editor in the repository root
770 olddir = os.getcwd()
772 olddir = os.getcwd()
771 os.chdir(self.root)
773 os.chdir(self.root)
772 text = self.ui.edit("\n".join(edittext), user)
774 text = self.ui.edit("\n".join(edittext), user)
773 os.chdir(olddir)
775 os.chdir(olddir)
774
776
775 lines = [line.rstrip() for line in text.rstrip().splitlines()]
777 lines = [line.rstrip() for line in text.rstrip().splitlines()]
776 while lines and not lines[0]:
778 while lines and not lines[0]:
777 del lines[0]
779 del lines[0]
778 if not lines:
780 if not lines:
779 return None
781 return None
780 text = '\n'.join(lines)
782 text = '\n'.join(lines)
781 if branchname:
783 if branchname:
782 extra["branch"] = branchname
784 extra["branch"] = branchname
783 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
785 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
784 user, date, extra)
786 user, date, extra)
785 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
787 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
786 parent2=xp2)
788 parent2=xp2)
787 tr.close()
789 tr.close()
788
790
789 if self.branchcache and "branch" in extra:
791 if self.branchcache and "branch" in extra:
790 self.branchcache[util.tolocal(extra["branch"])] = n
792 self.branchcache[util.tolocal(extra["branch"])] = n
791
793
792 if use_dirstate or update_dirstate:
794 if use_dirstate or update_dirstate:
793 self.dirstate.setparents(n)
795 self.dirstate.setparents(n)
794 if use_dirstate:
796 if use_dirstate:
795 self.dirstate.update(new, "n")
797 self.dirstate.update(new, "n")
796 self.dirstate.forget(removed)
798 self.dirstate.forget(removed)
797
799
798 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
800 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
799 return n
801 return n
800
802
801 def walk(self, node=None, files=[], match=util.always, badmatch=None):
803 def walk(self, node=None, files=[], match=util.always, badmatch=None):
802 '''
804 '''
803 walk recursively through the directory tree or a given
805 walk recursively through the directory tree or a given
804 changeset, finding all files matched by the match
806 changeset, finding all files matched by the match
805 function
807 function
806
808
807 results are yielded in a tuple (src, filename), where src
809 results are yielded in a tuple (src, filename), where src
808 is one of:
810 is one of:
809 'f' the file was found in the directory tree
811 'f' the file was found in the directory tree
810 'm' the file was only in the dirstate and not in the tree
812 'm' the file was only in the dirstate and not in the tree
811 'b' file was not found and matched badmatch
813 'b' file was not found and matched badmatch
812 '''
814 '''
813
815
814 if node:
816 if node:
815 fdict = dict.fromkeys(files)
817 fdict = dict.fromkeys(files)
816 for fn in self.manifest.read(self.changelog.read(node)[0]):
818 for fn in self.manifest.read(self.changelog.read(node)[0]):
817 for ffn in fdict:
819 for ffn in fdict:
818 # match if the file is the exact name or a directory
820 # match if the file is the exact name or a directory
819 if ffn == fn or fn.startswith("%s/" % ffn):
821 if ffn == fn or fn.startswith("%s/" % ffn):
820 del fdict[ffn]
822 del fdict[ffn]
821 break
823 break
822 if match(fn):
824 if match(fn):
823 yield 'm', fn
825 yield 'm', fn
824 for fn in fdict:
826 for fn in fdict:
825 if badmatch and badmatch(fn):
827 if badmatch and badmatch(fn):
826 if match(fn):
828 if match(fn):
827 yield 'b', fn
829 yield 'b', fn
828 else:
830 else:
829 self.ui.warn(_('%s: No such file in rev %s\n') % (
831 self.ui.warn(_('%s: No such file in rev %s\n') % (
830 util.pathto(self.getcwd(), fn), short(node)))
832 util.pathto(self.getcwd(), fn), short(node)))
831 else:
833 else:
832 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
834 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
833 yield src, fn
835 yield src, fn
834
836
835 def status(self, node1=None, node2=None, files=[], match=util.always,
837 def status(self, node1=None, node2=None, files=[], match=util.always,
836 wlock=None, list_ignored=False, list_clean=False):
838 wlock=None, list_ignored=False, list_clean=False):
837 """return status of files between two nodes or node and working directory
839 """return status of files between two nodes or node and working directory
838
840
839 If node1 is None, use the first dirstate parent instead.
841 If node1 is None, use the first dirstate parent instead.
840 If node2 is None, compare node1 with working directory.
842 If node2 is None, compare node1 with working directory.
841 """
843 """
842
844
843 def fcmp(fn, mf):
845 def fcmp(fn, mf):
844 t1 = self.wread(fn)
846 t1 = self.wread(fn)
845 return self.file(fn).cmp(mf.get(fn, nullid), t1)
847 return self.file(fn).cmp(mf.get(fn, nullid), t1)
846
848
847 def mfmatches(node):
849 def mfmatches(node):
848 change = self.changelog.read(node)
850 change = self.changelog.read(node)
849 mf = self.manifest.read(change[0]).copy()
851 mf = self.manifest.read(change[0]).copy()
850 for fn in mf.keys():
852 for fn in mf.keys():
851 if not match(fn):
853 if not match(fn):
852 del mf[fn]
854 del mf[fn]
853 return mf
855 return mf
854
856
855 modified, added, removed, deleted, unknown = [], [], [], [], []
857 modified, added, removed, deleted, unknown = [], [], [], [], []
856 ignored, clean = [], []
858 ignored, clean = [], []
857
859
858 compareworking = False
860 compareworking = False
859 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
861 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
860 compareworking = True
862 compareworking = True
861
863
862 if not compareworking:
864 if not compareworking:
863 # read the manifest from node1 before the manifest from node2,
865 # read the manifest from node1 before the manifest from node2,
864 # so that we'll hit the manifest cache if we're going through
866 # so that we'll hit the manifest cache if we're going through
865 # all the revisions in parent->child order.
867 # all the revisions in parent->child order.
866 mf1 = mfmatches(node1)
868 mf1 = mfmatches(node1)
867
869
868 # are we comparing the working directory?
870 # are we comparing the working directory?
869 if not node2:
871 if not node2:
870 if not wlock:
872 if not wlock:
871 try:
873 try:
872 wlock = self.wlock(wait=0)
874 wlock = self.wlock(wait=0)
873 except lock.LockException:
875 except lock.LockException:
874 wlock = None
876 wlock = None
875 (lookup, modified, added, removed, deleted, unknown,
877 (lookup, modified, added, removed, deleted, unknown,
876 ignored, clean) = self.dirstate.status(files, match,
878 ignored, clean) = self.dirstate.status(files, match,
877 list_ignored, list_clean)
879 list_ignored, list_clean)
878
880
879 # are we comparing working dir against its parent?
881 # are we comparing working dir against its parent?
880 if compareworking:
882 if compareworking:
881 if lookup:
883 if lookup:
882 # do a full compare of any files that might have changed
884 # do a full compare of any files that might have changed
883 mf2 = mfmatches(self.dirstate.parents()[0])
885 mf2 = mfmatches(self.dirstate.parents()[0])
884 for f in lookup:
886 for f in lookup:
885 if fcmp(f, mf2):
887 if fcmp(f, mf2):
886 modified.append(f)
888 modified.append(f)
887 else:
889 else:
888 clean.append(f)
890 clean.append(f)
889 if wlock is not None:
891 if wlock is not None:
890 self.dirstate.update([f], "n")
892 self.dirstate.update([f], "n")
891 else:
893 else:
892 # we are comparing working dir against non-parent
894 # we are comparing working dir against non-parent
893 # generate a pseudo-manifest for the working dir
895 # generate a pseudo-manifest for the working dir
894 # XXX: create it in dirstate.py ?
896 # XXX: create it in dirstate.py ?
895 mf2 = mfmatches(self.dirstate.parents()[0])
897 mf2 = mfmatches(self.dirstate.parents()[0])
896 is_exec = util.execfunc(self.root, mf2.execf)
898 is_exec = util.execfunc(self.root, mf2.execf)
897 is_link = util.linkfunc(self.root, mf2.linkf)
899 is_link = util.linkfunc(self.root, mf2.linkf)
898 for f in lookup + modified + added:
900 for f in lookup + modified + added:
899 mf2[f] = ""
901 mf2[f] = ""
900 mf2.set(f, is_exec(f), is_link(f))
902 mf2.set(f, is_exec(f), is_link(f))
901 for f in removed:
903 for f in removed:
902 if f in mf2:
904 if f in mf2:
903 del mf2[f]
905 del mf2[f]
904 else:
906 else:
905 # we are comparing two revisions
907 # we are comparing two revisions
906 mf2 = mfmatches(node2)
908 mf2 = mfmatches(node2)
907
909
908 if not compareworking:
910 if not compareworking:
909 # flush lists from dirstate before comparing manifests
911 # flush lists from dirstate before comparing manifests
910 modified, added, clean = [], [], []
912 modified, added, clean = [], [], []
911
913
912 # make sure to sort the files so we talk to the disk in a
914 # make sure to sort the files so we talk to the disk in a
913 # reasonable order
915 # reasonable order
914 mf2keys = mf2.keys()
916 mf2keys = mf2.keys()
915 mf2keys.sort()
917 mf2keys.sort()
916 for fn in mf2keys:
918 for fn in mf2keys:
917 if mf1.has_key(fn):
919 if mf1.has_key(fn):
918 if mf1.flags(fn) != mf2.flags(fn) or \
920 if mf1.flags(fn) != mf2.flags(fn) or \
919 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
921 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
920 modified.append(fn)
922 modified.append(fn)
921 elif list_clean:
923 elif list_clean:
922 clean.append(fn)
924 clean.append(fn)
923 del mf1[fn]
925 del mf1[fn]
924 else:
926 else:
925 added.append(fn)
927 added.append(fn)
926
928
927 removed = mf1.keys()
929 removed = mf1.keys()
928
930
929 # sort and return results:
931 # sort and return results:
930 for l in modified, added, removed, deleted, unknown, ignored, clean:
932 for l in modified, added, removed, deleted, unknown, ignored, clean:
931 l.sort()
933 l.sort()
932 return (modified, added, removed, deleted, unknown, ignored, clean)
934 return (modified, added, removed, deleted, unknown, ignored, clean)
933
935
934 def add(self, list, wlock=None):
936 def add(self, list, wlock=None):
935 if not wlock:
937 if not wlock:
936 wlock = self.wlock()
938 wlock = self.wlock()
937 for f in list:
939 for f in list:
938 p = self.wjoin(f)
940 p = self.wjoin(f)
939 if not os.path.exists(p):
941 if not os.path.exists(p):
940 self.ui.warn(_("%s does not exist!\n") % f)
942 self.ui.warn(_("%s does not exist!\n") % f)
941 elif not os.path.isfile(p):
943 elif not os.path.isfile(p):
942 self.ui.warn(_("%s not added: only files supported currently\n")
944 self.ui.warn(_("%s not added: only files supported currently\n")
943 % f)
945 % f)
944 elif self.dirstate.state(f) in 'an':
946 elif self.dirstate.state(f) in 'an':
945 self.ui.warn(_("%s already tracked!\n") % f)
947 self.ui.warn(_("%s already tracked!\n") % f)
946 else:
948 else:
947 self.dirstate.update([f], "a")
949 self.dirstate.update([f], "a")
948
950
949 def forget(self, list, wlock=None):
951 def forget(self, list, wlock=None):
950 if not wlock:
952 if not wlock:
951 wlock = self.wlock()
953 wlock = self.wlock()
952 for f in list:
954 for f in list:
953 if self.dirstate.state(f) not in 'ai':
955 if self.dirstate.state(f) not in 'ai':
954 self.ui.warn(_("%s not added!\n") % f)
956 self.ui.warn(_("%s not added!\n") % f)
955 else:
957 else:
956 self.dirstate.forget([f])
958 self.dirstate.forget([f])
957
959
958 def remove(self, list, unlink=False, wlock=None):
960 def remove(self, list, unlink=False, wlock=None):
959 if unlink:
961 if unlink:
960 for f in list:
962 for f in list:
961 try:
963 try:
962 util.unlink(self.wjoin(f))
964 util.unlink(self.wjoin(f))
963 except OSError, inst:
965 except OSError, inst:
964 if inst.errno != errno.ENOENT:
966 if inst.errno != errno.ENOENT:
965 raise
967 raise
966 if not wlock:
968 if not wlock:
967 wlock = self.wlock()
969 wlock = self.wlock()
968 for f in list:
970 for f in list:
969 p = self.wjoin(f)
971 p = self.wjoin(f)
970 if os.path.exists(p):
972 if os.path.exists(p):
971 self.ui.warn(_("%s still exists!\n") % f)
973 self.ui.warn(_("%s still exists!\n") % f)
972 elif self.dirstate.state(f) == 'a':
974 elif self.dirstate.state(f) == 'a':
973 self.dirstate.forget([f])
975 self.dirstate.forget([f])
974 elif f not in self.dirstate:
976 elif f not in self.dirstate:
975 self.ui.warn(_("%s not tracked!\n") % f)
977 self.ui.warn(_("%s not tracked!\n") % f)
976 else:
978 else:
977 self.dirstate.update([f], "r")
979 self.dirstate.update([f], "r")
978
980
979 def undelete(self, list, wlock=None):
981 def undelete(self, list, wlock=None):
980 p = self.dirstate.parents()[0]
982 p = self.dirstate.parents()[0]
981 mn = self.changelog.read(p)[0]
983 mn = self.changelog.read(p)[0]
982 m = self.manifest.read(mn)
984 m = self.manifest.read(mn)
983 if not wlock:
985 if not wlock:
984 wlock = self.wlock()
986 wlock = self.wlock()
985 for f in list:
987 for f in list:
986 if self.dirstate.state(f) not in "r":
988 if self.dirstate.state(f) not in "r":
987 self.ui.warn("%s not removed!\n" % f)
989 self.ui.warn("%s not removed!\n" % f)
988 else:
990 else:
989 t = self.file(f).read(m[f])
991 t = self.file(f).read(m[f])
990 self.wwrite(f, t, m.flags(f))
992 self.wwrite(f, t, m.flags(f))
991 self.dirstate.update([f], "n")
993 self.dirstate.update([f], "n")
992
994
993 def copy(self, source, dest, wlock=None):
995 def copy(self, source, dest, wlock=None):
994 p = self.wjoin(dest)
996 p = self.wjoin(dest)
995 if not os.path.exists(p):
997 if not os.path.exists(p):
996 self.ui.warn(_("%s does not exist!\n") % dest)
998 self.ui.warn(_("%s does not exist!\n") % dest)
997 elif not os.path.isfile(p):
999 elif not os.path.isfile(p):
998 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1000 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
999 else:
1001 else:
1000 if not wlock:
1002 if not wlock:
1001 wlock = self.wlock()
1003 wlock = self.wlock()
1002 if self.dirstate.state(dest) == '?':
1004 if self.dirstate.state(dest) == '?':
1003 self.dirstate.update([dest], "a")
1005 self.dirstate.update([dest], "a")
1004 self.dirstate.copy(source, dest)
1006 self.dirstate.copy(source, dest)
1005
1007
1006 def heads(self, start=None):
1008 def heads(self, start=None):
1007 heads = self.changelog.heads(start)
1009 heads = self.changelog.heads(start)
1008 # sort the output in rev descending order
1010 # sort the output in rev descending order
1009 heads = [(-self.changelog.rev(h), h) for h in heads]
1011 heads = [(-self.changelog.rev(h), h) for h in heads]
1010 heads.sort()
1012 heads.sort()
1011 return [n for (r, n) in heads]
1013 return [n for (r, n) in heads]
1012
1014
1013 def branches(self, nodes):
1015 def branches(self, nodes):
1014 if not nodes:
1016 if not nodes:
1015 nodes = [self.changelog.tip()]
1017 nodes = [self.changelog.tip()]
1016 b = []
1018 b = []
1017 for n in nodes:
1019 for n in nodes:
1018 t = n
1020 t = n
1019 while 1:
1021 while 1:
1020 p = self.changelog.parents(n)
1022 p = self.changelog.parents(n)
1021 if p[1] != nullid or p[0] == nullid:
1023 if p[1] != nullid or p[0] == nullid:
1022 b.append((t, n, p[0], p[1]))
1024 b.append((t, n, p[0], p[1]))
1023 break
1025 break
1024 n = p[0]
1026 n = p[0]
1025 return b
1027 return b
1026
1028
1027 def between(self, pairs):
1029 def between(self, pairs):
1028 r = []
1030 r = []
1029
1031
1030 for top, bottom in pairs:
1032 for top, bottom in pairs:
1031 n, l, i = top, [], 0
1033 n, l, i = top, [], 0
1032 f = 1
1034 f = 1
1033
1035
1034 while n != bottom:
1036 while n != bottom:
1035 p = self.changelog.parents(n)[0]
1037 p = self.changelog.parents(n)[0]
1036 if i == f:
1038 if i == f:
1037 l.append(n)
1039 l.append(n)
1038 f = f * 2
1040 f = f * 2
1039 n = p
1041 n = p
1040 i += 1
1042 i += 1
1041
1043
1042 r.append(l)
1044 r.append(l)
1043
1045
1044 return r
1046 return r
1045
1047
1046 def findincoming(self, remote, base=None, heads=None, force=False):
1048 def findincoming(self, remote, base=None, heads=None, force=False):
1047 """Return list of roots of the subsets of missing nodes from remote
1049 """Return list of roots of the subsets of missing nodes from remote
1048
1050
1049 If base dict is specified, assume that these nodes and their parents
1051 If base dict is specified, assume that these nodes and their parents
1050 exist on the remote side and that no child of a node of base exists
1052 exist on the remote side and that no child of a node of base exists
1051 in both remote and self.
1053 in both remote and self.
1052 Furthermore base will be updated to include the nodes that exists
1054 Furthermore base will be updated to include the nodes that exists
1053 in self and remote but no children exists in self and remote.
1055 in self and remote but no children exists in self and remote.
1054 If a list of heads is specified, return only nodes which are heads
1056 If a list of heads is specified, return only nodes which are heads
1055 or ancestors of these heads.
1057 or ancestors of these heads.
1056
1058
1057 All the ancestors of base are in self and in remote.
1059 All the ancestors of base are in self and in remote.
1058 All the descendants of the list returned are missing in self.
1060 All the descendants of the list returned are missing in self.
1059 (and so we know that the rest of the nodes are missing in remote, see
1061 (and so we know that the rest of the nodes are missing in remote, see
1060 outgoing)
1062 outgoing)
1061 """
1063 """
1062 m = self.changelog.nodemap
1064 m = self.changelog.nodemap
1063 search = []
1065 search = []
1064 fetch = {}
1066 fetch = {}
1065 seen = {}
1067 seen = {}
1066 seenbranch = {}
1068 seenbranch = {}
1067 if base == None:
1069 if base == None:
1068 base = {}
1070 base = {}
1069
1071
1070 if not heads:
1072 if not heads:
1071 heads = remote.heads()
1073 heads = remote.heads()
1072
1074
1073 if self.changelog.tip() == nullid:
1075 if self.changelog.tip() == nullid:
1074 base[nullid] = 1
1076 base[nullid] = 1
1075 if heads != [nullid]:
1077 if heads != [nullid]:
1076 return [nullid]
1078 return [nullid]
1077 return []
1079 return []
1078
1080
1079 # assume we're closer to the tip than the root
1081 # assume we're closer to the tip than the root
1080 # and start by examining the heads
1082 # and start by examining the heads
1081 self.ui.status(_("searching for changes\n"))
1083 self.ui.status(_("searching for changes\n"))
1082
1084
1083 unknown = []
1085 unknown = []
1084 for h in heads:
1086 for h in heads:
1085 if h not in m:
1087 if h not in m:
1086 unknown.append(h)
1088 unknown.append(h)
1087 else:
1089 else:
1088 base[h] = 1
1090 base[h] = 1
1089
1091
1090 if not unknown:
1092 if not unknown:
1091 return []
1093 return []
1092
1094
1093 req = dict.fromkeys(unknown)
1095 req = dict.fromkeys(unknown)
1094 reqcnt = 0
1096 reqcnt = 0
1095
1097
1096 # search through remote branches
1098 # search through remote branches
1097 # a 'branch' here is a linear segment of history, with four parts:
1099 # a 'branch' here is a linear segment of history, with four parts:
1098 # head, root, first parent, second parent
1100 # head, root, first parent, second parent
1099 # (a branch always has two parents (or none) by definition)
1101 # (a branch always has two parents (or none) by definition)
1100 unknown = remote.branches(unknown)
1102 unknown = remote.branches(unknown)
1101 while unknown:
1103 while unknown:
1102 r = []
1104 r = []
1103 while unknown:
1105 while unknown:
1104 n = unknown.pop(0)
1106 n = unknown.pop(0)
1105 if n[0] in seen:
1107 if n[0] in seen:
1106 continue
1108 continue
1107
1109
1108 self.ui.debug(_("examining %s:%s\n")
1110 self.ui.debug(_("examining %s:%s\n")
1109 % (short(n[0]), short(n[1])))
1111 % (short(n[0]), short(n[1])))
1110 if n[0] == nullid: # found the end of the branch
1112 if n[0] == nullid: # found the end of the branch
1111 pass
1113 pass
1112 elif n in seenbranch:
1114 elif n in seenbranch:
1113 self.ui.debug(_("branch already found\n"))
1115 self.ui.debug(_("branch already found\n"))
1114 continue
1116 continue
1115 elif n[1] and n[1] in m: # do we know the base?
1117 elif n[1] and n[1] in m: # do we know the base?
1116 self.ui.debug(_("found incomplete branch %s:%s\n")
1118 self.ui.debug(_("found incomplete branch %s:%s\n")
1117 % (short(n[0]), short(n[1])))
1119 % (short(n[0]), short(n[1])))
1118 search.append(n) # schedule branch range for scanning
1120 search.append(n) # schedule branch range for scanning
1119 seenbranch[n] = 1
1121 seenbranch[n] = 1
1120 else:
1122 else:
1121 if n[1] not in seen and n[1] not in fetch:
1123 if n[1] not in seen and n[1] not in fetch:
1122 if n[2] in m and n[3] in m:
1124 if n[2] in m and n[3] in m:
1123 self.ui.debug(_("found new changeset %s\n") %
1125 self.ui.debug(_("found new changeset %s\n") %
1124 short(n[1]))
1126 short(n[1]))
1125 fetch[n[1]] = 1 # earliest unknown
1127 fetch[n[1]] = 1 # earliest unknown
1126 for p in n[2:4]:
1128 for p in n[2:4]:
1127 if p in m:
1129 if p in m:
1128 base[p] = 1 # latest known
1130 base[p] = 1 # latest known
1129
1131
1130 for p in n[2:4]:
1132 for p in n[2:4]:
1131 if p not in req and p not in m:
1133 if p not in req and p not in m:
1132 r.append(p)
1134 r.append(p)
1133 req[p] = 1
1135 req[p] = 1
1134 seen[n[0]] = 1
1136 seen[n[0]] = 1
1135
1137
1136 if r:
1138 if r:
1137 reqcnt += 1
1139 reqcnt += 1
1138 self.ui.debug(_("request %d: %s\n") %
1140 self.ui.debug(_("request %d: %s\n") %
1139 (reqcnt, " ".join(map(short, r))))
1141 (reqcnt, " ".join(map(short, r))))
1140 for p in xrange(0, len(r), 10):
1142 for p in xrange(0, len(r), 10):
1141 for b in remote.branches(r[p:p+10]):
1143 for b in remote.branches(r[p:p+10]):
1142 self.ui.debug(_("received %s:%s\n") %
1144 self.ui.debug(_("received %s:%s\n") %
1143 (short(b[0]), short(b[1])))
1145 (short(b[0]), short(b[1])))
1144 unknown.append(b)
1146 unknown.append(b)
1145
1147
1146 # do binary search on the branches we found
1148 # do binary search on the branches we found
1147 while search:
1149 while search:
1148 n = search.pop(0)
1150 n = search.pop(0)
1149 reqcnt += 1
1151 reqcnt += 1
1150 l = remote.between([(n[0], n[1])])[0]
1152 l = remote.between([(n[0], n[1])])[0]
1151 l.append(n[1])
1153 l.append(n[1])
1152 p = n[0]
1154 p = n[0]
1153 f = 1
1155 f = 1
1154 for i in l:
1156 for i in l:
1155 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1157 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1156 if i in m:
1158 if i in m:
1157 if f <= 2:
1159 if f <= 2:
1158 self.ui.debug(_("found new branch changeset %s\n") %
1160 self.ui.debug(_("found new branch changeset %s\n") %
1159 short(p))
1161 short(p))
1160 fetch[p] = 1
1162 fetch[p] = 1
1161 base[i] = 1
1163 base[i] = 1
1162 else:
1164 else:
1163 self.ui.debug(_("narrowed branch search to %s:%s\n")
1165 self.ui.debug(_("narrowed branch search to %s:%s\n")
1164 % (short(p), short(i)))
1166 % (short(p), short(i)))
1165 search.append((p, i))
1167 search.append((p, i))
1166 break
1168 break
1167 p, f = i, f * 2
1169 p, f = i, f * 2
1168
1170
1169 # sanity check our fetch list
1171 # sanity check our fetch list
1170 for f in fetch.keys():
1172 for f in fetch.keys():
1171 if f in m:
1173 if f in m:
1172 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1174 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1173
1175
1174 if base.keys() == [nullid]:
1176 if base.keys() == [nullid]:
1175 if force:
1177 if force:
1176 self.ui.warn(_("warning: repository is unrelated\n"))
1178 self.ui.warn(_("warning: repository is unrelated\n"))
1177 else:
1179 else:
1178 raise util.Abort(_("repository is unrelated"))
1180 raise util.Abort(_("repository is unrelated"))
1179
1181
1180 self.ui.debug(_("found new changesets starting at ") +
1182 self.ui.debug(_("found new changesets starting at ") +
1181 " ".join([short(f) for f in fetch]) + "\n")
1183 " ".join([short(f) for f in fetch]) + "\n")
1182
1184
1183 self.ui.debug(_("%d total queries\n") % reqcnt)
1185 self.ui.debug(_("%d total queries\n") % reqcnt)
1184
1186
1185 return fetch.keys()
1187 return fetch.keys()
1186
1188
1187 def findoutgoing(self, remote, base=None, heads=None, force=False):
1189 def findoutgoing(self, remote, base=None, heads=None, force=False):
1188 """Return list of nodes that are roots of subsets not in remote
1190 """Return list of nodes that are roots of subsets not in remote
1189
1191
1190 If base dict is specified, assume that these nodes and their parents
1192 If base dict is specified, assume that these nodes and their parents
1191 exist on the remote side.
1193 exist on the remote side.
1192 If a list of heads is specified, return only nodes which are heads
1194 If a list of heads is specified, return only nodes which are heads
1193 or ancestors of these heads, and return a second element which
1195 or ancestors of these heads, and return a second element which
1194 contains all remote heads which get new children.
1196 contains all remote heads which get new children.
1195 """
1197 """
1196 if base == None:
1198 if base == None:
1197 base = {}
1199 base = {}
1198 self.findincoming(remote, base, heads, force=force)
1200 self.findincoming(remote, base, heads, force=force)
1199
1201
1200 self.ui.debug(_("common changesets up to ")
1202 self.ui.debug(_("common changesets up to ")
1201 + " ".join(map(short, base.keys())) + "\n")
1203 + " ".join(map(short, base.keys())) + "\n")
1202
1204
1203 remain = dict.fromkeys(self.changelog.nodemap)
1205 remain = dict.fromkeys(self.changelog.nodemap)
1204
1206
1205 # prune everything remote has from the tree
1207 # prune everything remote has from the tree
1206 del remain[nullid]
1208 del remain[nullid]
1207 remove = base.keys()
1209 remove = base.keys()
1208 while remove:
1210 while remove:
1209 n = remove.pop(0)
1211 n = remove.pop(0)
1210 if n in remain:
1212 if n in remain:
1211 del remain[n]
1213 del remain[n]
1212 for p in self.changelog.parents(n):
1214 for p in self.changelog.parents(n):
1213 remove.append(p)
1215 remove.append(p)
1214
1216
1215 # find every node whose parents have been pruned
1217 # find every node whose parents have been pruned
1216 subset = []
1218 subset = []
1217 # find every remote head that will get new children
1219 # find every remote head that will get new children
1218 updated_heads = {}
1220 updated_heads = {}
1219 for n in remain:
1221 for n in remain:
1220 p1, p2 = self.changelog.parents(n)
1222 p1, p2 = self.changelog.parents(n)
1221 if p1 not in remain and p2 not in remain:
1223 if p1 not in remain and p2 not in remain:
1222 subset.append(n)
1224 subset.append(n)
1223 if heads:
1225 if heads:
1224 if p1 in heads:
1226 if p1 in heads:
1225 updated_heads[p1] = True
1227 updated_heads[p1] = True
1226 if p2 in heads:
1228 if p2 in heads:
1227 updated_heads[p2] = True
1229 updated_heads[p2] = True
1228
1230
1229 # this is the set of all roots we have to push
1231 # this is the set of all roots we have to push
1230 if heads:
1232 if heads:
1231 return subset, updated_heads.keys()
1233 return subset, updated_heads.keys()
1232 else:
1234 else:
1233 return subset
1235 return subset
1234
1236
1235 def pull(self, remote, heads=None, force=False, lock=None):
1237 def pull(self, remote, heads=None, force=False, lock=None):
1236 mylock = False
1238 mylock = False
1237 if not lock:
1239 if not lock:
1238 lock = self.lock()
1240 lock = self.lock()
1239 mylock = True
1241 mylock = True
1240
1242
1241 try:
1243 try:
1242 fetch = self.findincoming(remote, force=force)
1244 fetch = self.findincoming(remote, force=force)
1243 if fetch == [nullid]:
1245 if fetch == [nullid]:
1244 self.ui.status(_("requesting all changes\n"))
1246 self.ui.status(_("requesting all changes\n"))
1245
1247
1246 if not fetch:
1248 if not fetch:
1247 self.ui.status(_("no changes found\n"))
1249 self.ui.status(_("no changes found\n"))
1248 return 0
1250 return 0
1249
1251
1250 if heads is None:
1252 if heads is None:
1251 cg = remote.changegroup(fetch, 'pull')
1253 cg = remote.changegroup(fetch, 'pull')
1252 else:
1254 else:
1253 if 'changegroupsubset' not in remote.capabilities:
1255 if 'changegroupsubset' not in remote.capabilities:
1254 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1256 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1255 cg = remote.changegroupsubset(fetch, heads, 'pull')
1257 cg = remote.changegroupsubset(fetch, heads, 'pull')
1256 return self.addchangegroup(cg, 'pull', remote.url())
1258 return self.addchangegroup(cg, 'pull', remote.url())
1257 finally:
1259 finally:
1258 if mylock:
1260 if mylock:
1259 lock.release()
1261 lock.release()
1260
1262
1261 def push(self, remote, force=False, revs=None):
1263 def push(self, remote, force=False, revs=None):
1262 # there are two ways to push to remote repo:
1264 # there are two ways to push to remote repo:
1263 #
1265 #
1264 # addchangegroup assumes local user can lock remote
1266 # addchangegroup assumes local user can lock remote
1265 # repo (local filesystem, old ssh servers).
1267 # repo (local filesystem, old ssh servers).
1266 #
1268 #
1267 # unbundle assumes local user cannot lock remote repo (new ssh
1269 # unbundle assumes local user cannot lock remote repo (new ssh
1268 # servers, http servers).
1270 # servers, http servers).
1269
1271
1270 if remote.capable('unbundle'):
1272 if remote.capable('unbundle'):
1271 return self.push_unbundle(remote, force, revs)
1273 return self.push_unbundle(remote, force, revs)
1272 return self.push_addchangegroup(remote, force, revs)
1274 return self.push_addchangegroup(remote, force, revs)
1273
1275
1274 def prepush(self, remote, force, revs):
1276 def prepush(self, remote, force, revs):
1275 base = {}
1277 base = {}
1276 remote_heads = remote.heads()
1278 remote_heads = remote.heads()
1277 inc = self.findincoming(remote, base, remote_heads, force=force)
1279 inc = self.findincoming(remote, base, remote_heads, force=force)
1278
1280
1279 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1281 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1280 if revs is not None:
1282 if revs is not None:
1281 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1283 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1282 else:
1284 else:
1283 bases, heads = update, self.changelog.heads()
1285 bases, heads = update, self.changelog.heads()
1284
1286
1285 if not bases:
1287 if not bases:
1286 self.ui.status(_("no changes found\n"))
1288 self.ui.status(_("no changes found\n"))
1287 return None, 1
1289 return None, 1
1288 elif not force:
1290 elif not force:
1289 # check if we're creating new remote heads
1291 # check if we're creating new remote heads
1290 # to be a remote head after push, node must be either
1292 # to be a remote head after push, node must be either
1291 # - unknown locally
1293 # - unknown locally
1292 # - a local outgoing head descended from update
1294 # - a local outgoing head descended from update
1293 # - a remote head that's known locally and not
1295 # - a remote head that's known locally and not
1294 # ancestral to an outgoing head
1296 # ancestral to an outgoing head
1295
1297
1296 warn = 0
1298 warn = 0
1297
1299
1298 if remote_heads == [nullid]:
1300 if remote_heads == [nullid]:
1299 warn = 0
1301 warn = 0
1300 elif not revs and len(heads) > len(remote_heads):
1302 elif not revs and len(heads) > len(remote_heads):
1301 warn = 1
1303 warn = 1
1302 else:
1304 else:
1303 newheads = list(heads)
1305 newheads = list(heads)
1304 for r in remote_heads:
1306 for r in remote_heads:
1305 if r in self.changelog.nodemap:
1307 if r in self.changelog.nodemap:
1306 desc = self.changelog.heads(r, heads)
1308 desc = self.changelog.heads(r, heads)
1307 l = [h for h in heads if h in desc]
1309 l = [h for h in heads if h in desc]
1308 if not l:
1310 if not l:
1309 newheads.append(r)
1311 newheads.append(r)
1310 else:
1312 else:
1311 newheads.append(r)
1313 newheads.append(r)
1312 if len(newheads) > len(remote_heads):
1314 if len(newheads) > len(remote_heads):
1313 warn = 1
1315 warn = 1
1314
1316
1315 if warn:
1317 if warn:
1316 self.ui.warn(_("abort: push creates new remote branches!\n"))
1318 self.ui.warn(_("abort: push creates new remote branches!\n"))
1317 self.ui.status(_("(did you forget to merge?"
1319 self.ui.status(_("(did you forget to merge?"
1318 " use push -f to force)\n"))
1320 " use push -f to force)\n"))
1319 return None, 1
1321 return None, 1
1320 elif inc:
1322 elif inc:
1321 self.ui.warn(_("note: unsynced remote changes!\n"))
1323 self.ui.warn(_("note: unsynced remote changes!\n"))
1322
1324
1323
1325
1324 if revs is None:
1326 if revs is None:
1325 cg = self.changegroup(update, 'push')
1327 cg = self.changegroup(update, 'push')
1326 else:
1328 else:
1327 cg = self.changegroupsubset(update, revs, 'push')
1329 cg = self.changegroupsubset(update, revs, 'push')
1328 return cg, remote_heads
1330 return cg, remote_heads
1329
1331
1330 def push_addchangegroup(self, remote, force, revs):
1332 def push_addchangegroup(self, remote, force, revs):
1331 lock = remote.lock()
1333 lock = remote.lock()
1332
1334
1333 ret = self.prepush(remote, force, revs)
1335 ret = self.prepush(remote, force, revs)
1334 if ret[0] is not None:
1336 if ret[0] is not None:
1335 cg, remote_heads = ret
1337 cg, remote_heads = ret
1336 return remote.addchangegroup(cg, 'push', self.url())
1338 return remote.addchangegroup(cg, 'push', self.url())
1337 return ret[1]
1339 return ret[1]
1338
1340
1339 def push_unbundle(self, remote, force, revs):
1341 def push_unbundle(self, remote, force, revs):
1340 # local repo finds heads on server, finds out what revs it
1342 # local repo finds heads on server, finds out what revs it
1341 # must push. once revs transferred, if server finds it has
1343 # must push. once revs transferred, if server finds it has
1342 # different heads (someone else won commit/push race), server
1344 # different heads (someone else won commit/push race), server
1343 # aborts.
1345 # aborts.
1344
1346
1345 ret = self.prepush(remote, force, revs)
1347 ret = self.prepush(remote, force, revs)
1346 if ret[0] is not None:
1348 if ret[0] is not None:
1347 cg, remote_heads = ret
1349 cg, remote_heads = ret
1348 if force: remote_heads = ['force']
1350 if force: remote_heads = ['force']
1349 return remote.unbundle(cg, remote_heads, 'push')
1351 return remote.unbundle(cg, remote_heads, 'push')
1350 return ret[1]
1352 return ret[1]
1351
1353
1352 def changegroupinfo(self, nodes):
1354 def changegroupinfo(self, nodes):
1353 self.ui.note(_("%d changesets found\n") % len(nodes))
1355 self.ui.note(_("%d changesets found\n") % len(nodes))
1354 if self.ui.debugflag:
1356 if self.ui.debugflag:
1355 self.ui.debug(_("List of changesets:\n"))
1357 self.ui.debug(_("List of changesets:\n"))
1356 for node in nodes:
1358 for node in nodes:
1357 self.ui.debug("%s\n" % hex(node))
1359 self.ui.debug("%s\n" % hex(node))
1358
1360
1359 def changegroupsubset(self, bases, heads, source):
1361 def changegroupsubset(self, bases, heads, source):
1360 """This function generates a changegroup consisting of all the nodes
1362 """This function generates a changegroup consisting of all the nodes
1361 that are descendents of any of the bases, and ancestors of any of
1363 that are descendents of any of the bases, and ancestors of any of
1362 the heads.
1364 the heads.
1363
1365
1364 It is fairly complex as determining which filenodes and which
1366 It is fairly complex as determining which filenodes and which
1365 manifest nodes need to be included for the changeset to be complete
1367 manifest nodes need to be included for the changeset to be complete
1366 is non-trivial.
1368 is non-trivial.
1367
1369
1368 Another wrinkle is doing the reverse, figuring out which changeset in
1370 Another wrinkle is doing the reverse, figuring out which changeset in
1369 the changegroup a particular filenode or manifestnode belongs to."""
1371 the changegroup a particular filenode or manifestnode belongs to."""
1370
1372
1371 self.hook('preoutgoing', throw=True, source=source)
1373 self.hook('preoutgoing', throw=True, source=source)
1372
1374
1373 # Set up some initial variables
1375 # Set up some initial variables
1374 # Make it easy to refer to self.changelog
1376 # Make it easy to refer to self.changelog
1375 cl = self.changelog
1377 cl = self.changelog
1376 # msng is short for missing - compute the list of changesets in this
1378 # msng is short for missing - compute the list of changesets in this
1377 # changegroup.
1379 # changegroup.
1378 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1380 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1379 self.changegroupinfo(msng_cl_lst)
1381 self.changegroupinfo(msng_cl_lst)
1380 # Some bases may turn out to be superfluous, and some heads may be
1382 # Some bases may turn out to be superfluous, and some heads may be
1381 # too. nodesbetween will return the minimal set of bases and heads
1383 # too. nodesbetween will return the minimal set of bases and heads
1382 # necessary to re-create the changegroup.
1384 # necessary to re-create the changegroup.
1383
1385
1384 # Known heads are the list of heads that it is assumed the recipient
1386 # Known heads are the list of heads that it is assumed the recipient
1385 # of this changegroup will know about.
1387 # of this changegroup will know about.
1386 knownheads = {}
1388 knownheads = {}
1387 # We assume that all parents of bases are known heads.
1389 # We assume that all parents of bases are known heads.
1388 for n in bases:
1390 for n in bases:
1389 for p in cl.parents(n):
1391 for p in cl.parents(n):
1390 if p != nullid:
1392 if p != nullid:
1391 knownheads[p] = 1
1393 knownheads[p] = 1
1392 knownheads = knownheads.keys()
1394 knownheads = knownheads.keys()
1393 if knownheads:
1395 if knownheads:
1394 # Now that we know what heads are known, we can compute which
1396 # Now that we know what heads are known, we can compute which
1395 # changesets are known. The recipient must know about all
1397 # changesets are known. The recipient must know about all
1396 # changesets required to reach the known heads from the null
1398 # changesets required to reach the known heads from the null
1397 # changeset.
1399 # changeset.
1398 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1400 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1399 junk = None
1401 junk = None
1400 # Transform the list into an ersatz set.
1402 # Transform the list into an ersatz set.
1401 has_cl_set = dict.fromkeys(has_cl_set)
1403 has_cl_set = dict.fromkeys(has_cl_set)
1402 else:
1404 else:
1403 # If there were no known heads, the recipient cannot be assumed to
1405 # If there were no known heads, the recipient cannot be assumed to
1404 # know about any changesets.
1406 # know about any changesets.
1405 has_cl_set = {}
1407 has_cl_set = {}
1406
1408
1407 # Make it easy to refer to self.manifest
1409 # Make it easy to refer to self.manifest
1408 mnfst = self.manifest
1410 mnfst = self.manifest
1409 # We don't know which manifests are missing yet
1411 # We don't know which manifests are missing yet
1410 msng_mnfst_set = {}
1412 msng_mnfst_set = {}
1411 # Nor do we know which filenodes are missing.
1413 # Nor do we know which filenodes are missing.
1412 msng_filenode_set = {}
1414 msng_filenode_set = {}
1413
1415
1414 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1416 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1415 junk = None
1417 junk = None
1416
1418
1417 # A changeset always belongs to itself, so the changenode lookup
1419 # A changeset always belongs to itself, so the changenode lookup
1418 # function for a changenode is identity.
1420 # function for a changenode is identity.
1419 def identity(x):
1421 def identity(x):
1420 return x
1422 return x
1421
1423
1422 # A function generating function. Sets up an environment for the
1424 # A function generating function. Sets up an environment for the
1423 # inner function.
1425 # inner function.
1424 def cmp_by_rev_func(revlog):
1426 def cmp_by_rev_func(revlog):
1425 # Compare two nodes by their revision number in the environment's
1427 # Compare two nodes by their revision number in the environment's
1426 # revision history. Since the revision number both represents the
1428 # revision history. Since the revision number both represents the
1427 # most efficient order to read the nodes in, and represents a
1429 # most efficient order to read the nodes in, and represents a
1428 # topological sorting of the nodes, this function is often useful.
1430 # topological sorting of the nodes, this function is often useful.
1429 def cmp_by_rev(a, b):
1431 def cmp_by_rev(a, b):
1430 return cmp(revlog.rev(a), revlog.rev(b))
1432 return cmp(revlog.rev(a), revlog.rev(b))
1431 return cmp_by_rev
1433 return cmp_by_rev
1432
1434
1433 # If we determine that a particular file or manifest node must be a
1435 # If we determine that a particular file or manifest node must be a
1434 # node that the recipient of the changegroup will already have, we can
1436 # node that the recipient of the changegroup will already have, we can
1435 # also assume the recipient will have all the parents. This function
1437 # also assume the recipient will have all the parents. This function
1436 # prunes them from the set of missing nodes.
1438 # prunes them from the set of missing nodes.
1437 def prune_parents(revlog, hasset, msngset):
1439 def prune_parents(revlog, hasset, msngset):
1438 haslst = hasset.keys()
1440 haslst = hasset.keys()
1439 haslst.sort(cmp_by_rev_func(revlog))
1441 haslst.sort(cmp_by_rev_func(revlog))
1440 for node in haslst:
1442 for node in haslst:
1441 parentlst = [p for p in revlog.parents(node) if p != nullid]
1443 parentlst = [p for p in revlog.parents(node) if p != nullid]
1442 while parentlst:
1444 while parentlst:
1443 n = parentlst.pop()
1445 n = parentlst.pop()
1444 if n not in hasset:
1446 if n not in hasset:
1445 hasset[n] = 1
1447 hasset[n] = 1
1446 p = [p for p in revlog.parents(n) if p != nullid]
1448 p = [p for p in revlog.parents(n) if p != nullid]
1447 parentlst.extend(p)
1449 parentlst.extend(p)
1448 for n in hasset:
1450 for n in hasset:
1449 msngset.pop(n, None)
1451 msngset.pop(n, None)
1450
1452
1451 # This is a function generating function used to set up an environment
1453 # This is a function generating function used to set up an environment
1452 # for the inner function to execute in.
1454 # for the inner function to execute in.
1453 def manifest_and_file_collector(changedfileset):
1455 def manifest_and_file_collector(changedfileset):
1454 # This is an information gathering function that gathers
1456 # This is an information gathering function that gathers
1455 # information from each changeset node that goes out as part of
1457 # information from each changeset node that goes out as part of
1456 # the changegroup. The information gathered is a list of which
1458 # the changegroup. The information gathered is a list of which
1457 # manifest nodes are potentially required (the recipient may
1459 # manifest nodes are potentially required (the recipient may
1458 # already have them) and total list of all files which were
1460 # already have them) and total list of all files which were
1459 # changed in any changeset in the changegroup.
1461 # changed in any changeset in the changegroup.
1460 #
1462 #
1461 # We also remember the first changenode we saw any manifest
1463 # We also remember the first changenode we saw any manifest
1462 # referenced by so we can later determine which changenode 'owns'
1464 # referenced by so we can later determine which changenode 'owns'
1463 # the manifest.
1465 # the manifest.
1464 def collect_manifests_and_files(clnode):
1466 def collect_manifests_and_files(clnode):
1465 c = cl.read(clnode)
1467 c = cl.read(clnode)
1466 for f in c[3]:
1468 for f in c[3]:
1467 # This is to make sure we only have one instance of each
1469 # This is to make sure we only have one instance of each
1468 # filename string for each filename.
1470 # filename string for each filename.
1469 changedfileset.setdefault(f, f)
1471 changedfileset.setdefault(f, f)
1470 msng_mnfst_set.setdefault(c[0], clnode)
1472 msng_mnfst_set.setdefault(c[0], clnode)
1471 return collect_manifests_and_files
1473 return collect_manifests_and_files
1472
1474
1473 # Figure out which manifest nodes (of the ones we think might be part
1475 # Figure out which manifest nodes (of the ones we think might be part
1474 # of the changegroup) the recipient must know about and remove them
1476 # of the changegroup) the recipient must know about and remove them
1475 # from the changegroup.
1477 # from the changegroup.
1476 def prune_manifests():
1478 def prune_manifests():
1477 has_mnfst_set = {}
1479 has_mnfst_set = {}
1478 for n in msng_mnfst_set:
1480 for n in msng_mnfst_set:
1479 # If a 'missing' manifest thinks it belongs to a changenode
1481 # If a 'missing' manifest thinks it belongs to a changenode
1480 # the recipient is assumed to have, obviously the recipient
1482 # the recipient is assumed to have, obviously the recipient
1481 # must have that manifest.
1483 # must have that manifest.
1482 linknode = cl.node(mnfst.linkrev(n))
1484 linknode = cl.node(mnfst.linkrev(n))
1483 if linknode in has_cl_set:
1485 if linknode in has_cl_set:
1484 has_mnfst_set[n] = 1
1486 has_mnfst_set[n] = 1
1485 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1487 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1486
1488
1487 # Use the information collected in collect_manifests_and_files to say
1489 # Use the information collected in collect_manifests_and_files to say
1488 # which changenode any manifestnode belongs to.
1490 # which changenode any manifestnode belongs to.
1489 def lookup_manifest_link(mnfstnode):
1491 def lookup_manifest_link(mnfstnode):
1490 return msng_mnfst_set[mnfstnode]
1492 return msng_mnfst_set[mnfstnode]
1491
1493
1492 # A function generating function that sets up the initial environment
1494 # A function generating function that sets up the initial environment
1493 # the inner function.
1495 # the inner function.
1494 def filenode_collector(changedfiles):
1496 def filenode_collector(changedfiles):
1495 next_rev = [0]
1497 next_rev = [0]
1496 # This gathers information from each manifestnode included in the
1498 # This gathers information from each manifestnode included in the
1497 # changegroup about which filenodes the manifest node references
1499 # changegroup about which filenodes the manifest node references
1498 # so we can include those in the changegroup too.
1500 # so we can include those in the changegroup too.
1499 #
1501 #
1500 # It also remembers which changenode each filenode belongs to. It
1502 # It also remembers which changenode each filenode belongs to. It
1501 # does this by assuming the a filenode belongs to the changenode
1503 # does this by assuming the a filenode belongs to the changenode
1502 # the first manifest that references it belongs to.
1504 # the first manifest that references it belongs to.
1503 def collect_msng_filenodes(mnfstnode):
1505 def collect_msng_filenodes(mnfstnode):
1504 r = mnfst.rev(mnfstnode)
1506 r = mnfst.rev(mnfstnode)
1505 if r == next_rev[0]:
1507 if r == next_rev[0]:
1506 # If the last rev we looked at was the one just previous,
1508 # If the last rev we looked at was the one just previous,
1507 # we only need to see a diff.
1509 # we only need to see a diff.
1508 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1510 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1509 # For each line in the delta
1511 # For each line in the delta
1510 for dline in delta.splitlines():
1512 for dline in delta.splitlines():
1511 # get the filename and filenode for that line
1513 # get the filename and filenode for that line
1512 f, fnode = dline.split('\0')
1514 f, fnode = dline.split('\0')
1513 fnode = bin(fnode[:40])
1515 fnode = bin(fnode[:40])
1514 f = changedfiles.get(f, None)
1516 f = changedfiles.get(f, None)
1515 # And if the file is in the list of files we care
1517 # And if the file is in the list of files we care
1516 # about.
1518 # about.
1517 if f is not None:
1519 if f is not None:
1518 # Get the changenode this manifest belongs to
1520 # Get the changenode this manifest belongs to
1519 clnode = msng_mnfst_set[mnfstnode]
1521 clnode = msng_mnfst_set[mnfstnode]
1520 # Create the set of filenodes for the file if
1522 # Create the set of filenodes for the file if
1521 # there isn't one already.
1523 # there isn't one already.
1522 ndset = msng_filenode_set.setdefault(f, {})
1524 ndset = msng_filenode_set.setdefault(f, {})
1523 # And set the filenode's changelog node to the
1525 # And set the filenode's changelog node to the
1524 # manifest's if it hasn't been set already.
1526 # manifest's if it hasn't been set already.
1525 ndset.setdefault(fnode, clnode)
1527 ndset.setdefault(fnode, clnode)
1526 else:
1528 else:
1527 # Otherwise we need a full manifest.
1529 # Otherwise we need a full manifest.
1528 m = mnfst.read(mnfstnode)
1530 m = mnfst.read(mnfstnode)
1529 # For every file in we care about.
1531 # For every file in we care about.
1530 for f in changedfiles:
1532 for f in changedfiles:
1531 fnode = m.get(f, None)
1533 fnode = m.get(f, None)
1532 # If it's in the manifest
1534 # If it's in the manifest
1533 if fnode is not None:
1535 if fnode is not None:
1534 # See comments above.
1536 # See comments above.
1535 clnode = msng_mnfst_set[mnfstnode]
1537 clnode = msng_mnfst_set[mnfstnode]
1536 ndset = msng_filenode_set.setdefault(f, {})
1538 ndset = msng_filenode_set.setdefault(f, {})
1537 ndset.setdefault(fnode, clnode)
1539 ndset.setdefault(fnode, clnode)
1538 # Remember the revision we hope to see next.
1540 # Remember the revision we hope to see next.
1539 next_rev[0] = r + 1
1541 next_rev[0] = r + 1
1540 return collect_msng_filenodes
1542 return collect_msng_filenodes
1541
1543
1542 # We have a list of filenodes we think we need for a file, lets remove
1544 # We have a list of filenodes we think we need for a file, lets remove
1543 # all those we now the recipient must have.
1545 # all those we now the recipient must have.
1544 def prune_filenodes(f, filerevlog):
1546 def prune_filenodes(f, filerevlog):
1545 msngset = msng_filenode_set[f]
1547 msngset = msng_filenode_set[f]
1546 hasset = {}
1548 hasset = {}
1547 # If a 'missing' filenode thinks it belongs to a changenode we
1549 # If a 'missing' filenode thinks it belongs to a changenode we
1548 # assume the recipient must have, then the recipient must have
1550 # assume the recipient must have, then the recipient must have
1549 # that filenode.
1551 # that filenode.
1550 for n in msngset:
1552 for n in msngset:
1551 clnode = cl.node(filerevlog.linkrev(n))
1553 clnode = cl.node(filerevlog.linkrev(n))
1552 if clnode in has_cl_set:
1554 if clnode in has_cl_set:
1553 hasset[n] = 1
1555 hasset[n] = 1
1554 prune_parents(filerevlog, hasset, msngset)
1556 prune_parents(filerevlog, hasset, msngset)
1555
1557
1556 # A function generator function that sets up the a context for the
1558 # A function generator function that sets up the a context for the
1557 # inner function.
1559 # inner function.
1558 def lookup_filenode_link_func(fname):
1560 def lookup_filenode_link_func(fname):
1559 msngset = msng_filenode_set[fname]
1561 msngset = msng_filenode_set[fname]
1560 # Lookup the changenode the filenode belongs to.
1562 # Lookup the changenode the filenode belongs to.
1561 def lookup_filenode_link(fnode):
1563 def lookup_filenode_link(fnode):
1562 return msngset[fnode]
1564 return msngset[fnode]
1563 return lookup_filenode_link
1565 return lookup_filenode_link
1564
1566
1565 # Now that we have all theses utility functions to help out and
1567 # Now that we have all theses utility functions to help out and
1566 # logically divide up the task, generate the group.
1568 # logically divide up the task, generate the group.
1567 def gengroup():
1569 def gengroup():
1568 # The set of changed files starts empty.
1570 # The set of changed files starts empty.
1569 changedfiles = {}
1571 changedfiles = {}
1570 # Create a changenode group generator that will call our functions
1572 # Create a changenode group generator that will call our functions
1571 # back to lookup the owning changenode and collect information.
1573 # back to lookup the owning changenode and collect information.
1572 group = cl.group(msng_cl_lst, identity,
1574 group = cl.group(msng_cl_lst, identity,
1573 manifest_and_file_collector(changedfiles))
1575 manifest_and_file_collector(changedfiles))
1574 for chnk in group:
1576 for chnk in group:
1575 yield chnk
1577 yield chnk
1576
1578
1577 # The list of manifests has been collected by the generator
1579 # The list of manifests has been collected by the generator
1578 # calling our functions back.
1580 # calling our functions back.
1579 prune_manifests()
1581 prune_manifests()
1580 msng_mnfst_lst = msng_mnfst_set.keys()
1582 msng_mnfst_lst = msng_mnfst_set.keys()
1581 # Sort the manifestnodes by revision number.
1583 # Sort the manifestnodes by revision number.
1582 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1584 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1583 # Create a generator for the manifestnodes that calls our lookup
1585 # Create a generator for the manifestnodes that calls our lookup
1584 # and data collection functions back.
1586 # and data collection functions back.
1585 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1587 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1586 filenode_collector(changedfiles))
1588 filenode_collector(changedfiles))
1587 for chnk in group:
1589 for chnk in group:
1588 yield chnk
1590 yield chnk
1589
1591
1590 # These are no longer needed, dereference and toss the memory for
1592 # These are no longer needed, dereference and toss the memory for
1591 # them.
1593 # them.
1592 msng_mnfst_lst = None
1594 msng_mnfst_lst = None
1593 msng_mnfst_set.clear()
1595 msng_mnfst_set.clear()
1594
1596
1595 changedfiles = changedfiles.keys()
1597 changedfiles = changedfiles.keys()
1596 changedfiles.sort()
1598 changedfiles.sort()
1597 # Go through all our files in order sorted by name.
1599 # Go through all our files in order sorted by name.
1598 for fname in changedfiles:
1600 for fname in changedfiles:
1599 filerevlog = self.file(fname)
1601 filerevlog = self.file(fname)
1600 # Toss out the filenodes that the recipient isn't really
1602 # Toss out the filenodes that the recipient isn't really
1601 # missing.
1603 # missing.
1602 if msng_filenode_set.has_key(fname):
1604 if msng_filenode_set.has_key(fname):
1603 prune_filenodes(fname, filerevlog)
1605 prune_filenodes(fname, filerevlog)
1604 msng_filenode_lst = msng_filenode_set[fname].keys()
1606 msng_filenode_lst = msng_filenode_set[fname].keys()
1605 else:
1607 else:
1606 msng_filenode_lst = []
1608 msng_filenode_lst = []
1607 # If any filenodes are left, generate the group for them,
1609 # If any filenodes are left, generate the group for them,
1608 # otherwise don't bother.
1610 # otherwise don't bother.
1609 if len(msng_filenode_lst) > 0:
1611 if len(msng_filenode_lst) > 0:
1610 yield changegroup.genchunk(fname)
1612 yield changegroup.genchunk(fname)
1611 # Sort the filenodes by their revision #
1613 # Sort the filenodes by their revision #
1612 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1614 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1613 # Create a group generator and only pass in a changenode
1615 # Create a group generator and only pass in a changenode
1614 # lookup function as we need to collect no information
1616 # lookup function as we need to collect no information
1615 # from filenodes.
1617 # from filenodes.
1616 group = filerevlog.group(msng_filenode_lst,
1618 group = filerevlog.group(msng_filenode_lst,
1617 lookup_filenode_link_func(fname))
1619 lookup_filenode_link_func(fname))
1618 for chnk in group:
1620 for chnk in group:
1619 yield chnk
1621 yield chnk
1620 if msng_filenode_set.has_key(fname):
1622 if msng_filenode_set.has_key(fname):
1621 # Don't need this anymore, toss it to free memory.
1623 # Don't need this anymore, toss it to free memory.
1622 del msng_filenode_set[fname]
1624 del msng_filenode_set[fname]
1623 # Signal that no more groups are left.
1625 # Signal that no more groups are left.
1624 yield changegroup.closechunk()
1626 yield changegroup.closechunk()
1625
1627
1626 if msng_cl_lst:
1628 if msng_cl_lst:
1627 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1629 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1628
1630
1629 return util.chunkbuffer(gengroup())
1631 return util.chunkbuffer(gengroup())
1630
1632
1631 def changegroup(self, basenodes, source):
1633 def changegroup(self, basenodes, source):
1632 """Generate a changegroup of all nodes that we have that a recipient
1634 """Generate a changegroup of all nodes that we have that a recipient
1633 doesn't.
1635 doesn't.
1634
1636
1635 This is much easier than the previous function as we can assume that
1637 This is much easier than the previous function as we can assume that
1636 the recipient has any changenode we aren't sending them."""
1638 the recipient has any changenode we aren't sending them."""
1637
1639
1638 self.hook('preoutgoing', throw=True, source=source)
1640 self.hook('preoutgoing', throw=True, source=source)
1639
1641
1640 cl = self.changelog
1642 cl = self.changelog
1641 nodes = cl.nodesbetween(basenodes, None)[0]
1643 nodes = cl.nodesbetween(basenodes, None)[0]
1642 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1644 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1643 self.changegroupinfo(nodes)
1645 self.changegroupinfo(nodes)
1644
1646
1645 def identity(x):
1647 def identity(x):
1646 return x
1648 return x
1647
1649
1648 def gennodelst(revlog):
1650 def gennodelst(revlog):
1649 for r in xrange(0, revlog.count()):
1651 for r in xrange(0, revlog.count()):
1650 n = revlog.node(r)
1652 n = revlog.node(r)
1651 if revlog.linkrev(n) in revset:
1653 if revlog.linkrev(n) in revset:
1652 yield n
1654 yield n
1653
1655
1654 def changed_file_collector(changedfileset):
1656 def changed_file_collector(changedfileset):
1655 def collect_changed_files(clnode):
1657 def collect_changed_files(clnode):
1656 c = cl.read(clnode)
1658 c = cl.read(clnode)
1657 for fname in c[3]:
1659 for fname in c[3]:
1658 changedfileset[fname] = 1
1660 changedfileset[fname] = 1
1659 return collect_changed_files
1661 return collect_changed_files
1660
1662
1661 def lookuprevlink_func(revlog):
1663 def lookuprevlink_func(revlog):
1662 def lookuprevlink(n):
1664 def lookuprevlink(n):
1663 return cl.node(revlog.linkrev(n))
1665 return cl.node(revlog.linkrev(n))
1664 return lookuprevlink
1666 return lookuprevlink
1665
1667
1666 def gengroup():
1668 def gengroup():
1667 # construct a list of all changed files
1669 # construct a list of all changed files
1668 changedfiles = {}
1670 changedfiles = {}
1669
1671
1670 for chnk in cl.group(nodes, identity,
1672 for chnk in cl.group(nodes, identity,
1671 changed_file_collector(changedfiles)):
1673 changed_file_collector(changedfiles)):
1672 yield chnk
1674 yield chnk
1673 changedfiles = changedfiles.keys()
1675 changedfiles = changedfiles.keys()
1674 changedfiles.sort()
1676 changedfiles.sort()
1675
1677
1676 mnfst = self.manifest
1678 mnfst = self.manifest
1677 nodeiter = gennodelst(mnfst)
1679 nodeiter = gennodelst(mnfst)
1678 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1680 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1679 yield chnk
1681 yield chnk
1680
1682
1681 for fname in changedfiles:
1683 for fname in changedfiles:
1682 filerevlog = self.file(fname)
1684 filerevlog = self.file(fname)
1683 nodeiter = gennodelst(filerevlog)
1685 nodeiter = gennodelst(filerevlog)
1684 nodeiter = list(nodeiter)
1686 nodeiter = list(nodeiter)
1685 if nodeiter:
1687 if nodeiter:
1686 yield changegroup.genchunk(fname)
1688 yield changegroup.genchunk(fname)
1687 lookup = lookuprevlink_func(filerevlog)
1689 lookup = lookuprevlink_func(filerevlog)
1688 for chnk in filerevlog.group(nodeiter, lookup):
1690 for chnk in filerevlog.group(nodeiter, lookup):
1689 yield chnk
1691 yield chnk
1690
1692
1691 yield changegroup.closechunk()
1693 yield changegroup.closechunk()
1692
1694
1693 if nodes:
1695 if nodes:
1694 self.hook('outgoing', node=hex(nodes[0]), source=source)
1696 self.hook('outgoing', node=hex(nodes[0]), source=source)
1695
1697
1696 return util.chunkbuffer(gengroup())
1698 return util.chunkbuffer(gengroup())
1697
1699
1698 def addchangegroup(self, source, srctype, url):
1700 def addchangegroup(self, source, srctype, url):
1699 """add changegroup to repo.
1701 """add changegroup to repo.
1700
1702
1701 return values:
1703 return values:
1702 - nothing changed or no source: 0
1704 - nothing changed or no source: 0
1703 - more heads than before: 1+added heads (2..n)
1705 - more heads than before: 1+added heads (2..n)
1704 - less heads than before: -1-removed heads (-2..-n)
1706 - less heads than before: -1-removed heads (-2..-n)
1705 - number of heads stays the same: 1
1707 - number of heads stays the same: 1
1706 """
1708 """
1707 def csmap(x):
1709 def csmap(x):
1708 self.ui.debug(_("add changeset %s\n") % short(x))
1710 self.ui.debug(_("add changeset %s\n") % short(x))
1709 return cl.count()
1711 return cl.count()
1710
1712
1711 def revmap(x):
1713 def revmap(x):
1712 return cl.rev(x)
1714 return cl.rev(x)
1713
1715
1714 if not source:
1716 if not source:
1715 return 0
1717 return 0
1716
1718
1717 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1719 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1718
1720
1719 changesets = files = revisions = 0
1721 changesets = files = revisions = 0
1720
1722
1721 tr = self.transaction()
1723 tr = self.transaction()
1722
1724
1723 # write changelog data to temp files so concurrent readers will not see
1725 # write changelog data to temp files so concurrent readers will not see
1724 # inconsistent view
1726 # inconsistent view
1725 cl = None
1727 cl = None
1726 try:
1728 try:
1727 cl = appendfile.appendchangelog(self.sopener,
1729 cl = appendfile.appendchangelog(self.sopener,
1728 self.changelog.version)
1730 self.changelog.version)
1729
1731
1730 oldheads = len(cl.heads())
1732 oldheads = len(cl.heads())
1731
1733
1732 # pull off the changeset group
1734 # pull off the changeset group
1733 self.ui.status(_("adding changesets\n"))
1735 self.ui.status(_("adding changesets\n"))
1734 cor = cl.count() - 1
1736 cor = cl.count() - 1
1735 chunkiter = changegroup.chunkiter(source)
1737 chunkiter = changegroup.chunkiter(source)
1736 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1738 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1737 raise util.Abort(_("received changelog group is empty"))
1739 raise util.Abort(_("received changelog group is empty"))
1738 cnr = cl.count() - 1
1740 cnr = cl.count() - 1
1739 changesets = cnr - cor
1741 changesets = cnr - cor
1740
1742
1741 # pull off the manifest group
1743 # pull off the manifest group
1742 self.ui.status(_("adding manifests\n"))
1744 self.ui.status(_("adding manifests\n"))
1743 chunkiter = changegroup.chunkiter(source)
1745 chunkiter = changegroup.chunkiter(source)
1744 # no need to check for empty manifest group here:
1746 # no need to check for empty manifest group here:
1745 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1747 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1746 # no new manifest will be created and the manifest group will
1748 # no new manifest will be created and the manifest group will
1747 # be empty during the pull
1749 # be empty during the pull
1748 self.manifest.addgroup(chunkiter, revmap, tr)
1750 self.manifest.addgroup(chunkiter, revmap, tr)
1749
1751
1750 # process the files
1752 # process the files
1751 self.ui.status(_("adding file changes\n"))
1753 self.ui.status(_("adding file changes\n"))
1752 while 1:
1754 while 1:
1753 f = changegroup.getchunk(source)
1755 f = changegroup.getchunk(source)
1754 if not f:
1756 if not f:
1755 break
1757 break
1756 self.ui.debug(_("adding %s revisions\n") % f)
1758 self.ui.debug(_("adding %s revisions\n") % f)
1757 fl = self.file(f)
1759 fl = self.file(f)
1758 o = fl.count()
1760 o = fl.count()
1759 chunkiter = changegroup.chunkiter(source)
1761 chunkiter = changegroup.chunkiter(source)
1760 if fl.addgroup(chunkiter, revmap, tr) is None:
1762 if fl.addgroup(chunkiter, revmap, tr) is None:
1761 raise util.Abort(_("received file revlog group is empty"))
1763 raise util.Abort(_("received file revlog group is empty"))
1762 revisions += fl.count() - o
1764 revisions += fl.count() - o
1763 files += 1
1765 files += 1
1764
1766
1765 cl.writedata()
1767 cl.writedata()
1766 finally:
1768 finally:
1767 if cl:
1769 if cl:
1768 cl.cleanup()
1770 cl.cleanup()
1769
1771
1770 # make changelog see real files again
1772 # make changelog see real files again
1771 self.changelog = changelog.changelog(self.sopener,
1773 self.changelog = changelog.changelog(self.sopener,
1772 self.changelog.version)
1774 self.changelog.version)
1773 self.changelog.checkinlinesize(tr)
1775 self.changelog.checkinlinesize(tr)
1774
1776
1775 newheads = len(self.changelog.heads())
1777 newheads = len(self.changelog.heads())
1776 heads = ""
1778 heads = ""
1777 if oldheads and newheads != oldheads:
1779 if oldheads and newheads != oldheads:
1778 heads = _(" (%+d heads)") % (newheads - oldheads)
1780 heads = _(" (%+d heads)") % (newheads - oldheads)
1779
1781
1780 self.ui.status(_("added %d changesets"
1782 self.ui.status(_("added %d changesets"
1781 " with %d changes to %d files%s\n")
1783 " with %d changes to %d files%s\n")
1782 % (changesets, revisions, files, heads))
1784 % (changesets, revisions, files, heads))
1783
1785
1784 if changesets > 0:
1786 if changesets > 0:
1785 self.hook('pretxnchangegroup', throw=True,
1787 self.hook('pretxnchangegroup', throw=True,
1786 node=hex(self.changelog.node(cor+1)), source=srctype,
1788 node=hex(self.changelog.node(cor+1)), source=srctype,
1787 url=url)
1789 url=url)
1788
1790
1789 tr.close()
1791 tr.close()
1790
1792
1791 if changesets > 0:
1793 if changesets > 0:
1792 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1794 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1793 source=srctype, url=url)
1795 source=srctype, url=url)
1794
1796
1795 for i in xrange(cor + 1, cnr + 1):
1797 for i in xrange(cor + 1, cnr + 1):
1796 self.hook("incoming", node=hex(self.changelog.node(i)),
1798 self.hook("incoming", node=hex(self.changelog.node(i)),
1797 source=srctype, url=url)
1799 source=srctype, url=url)
1798
1800
1799 # never return 0 here:
1801 # never return 0 here:
1800 if newheads < oldheads:
1802 if newheads < oldheads:
1801 return newheads - oldheads - 1
1803 return newheads - oldheads - 1
1802 else:
1804 else:
1803 return newheads - oldheads + 1
1805 return newheads - oldheads + 1
1804
1806
1805
1807
1806 def stream_in(self, remote):
1808 def stream_in(self, remote):
1807 fp = remote.stream_out()
1809 fp = remote.stream_out()
1808 l = fp.readline()
1810 l = fp.readline()
1809 try:
1811 try:
1810 resp = int(l)
1812 resp = int(l)
1811 except ValueError:
1813 except ValueError:
1812 raise util.UnexpectedOutput(
1814 raise util.UnexpectedOutput(
1813 _('Unexpected response from remote server:'), l)
1815 _('Unexpected response from remote server:'), l)
1814 if resp == 1:
1816 if resp == 1:
1815 raise util.Abort(_('operation forbidden by server'))
1817 raise util.Abort(_('operation forbidden by server'))
1816 elif resp == 2:
1818 elif resp == 2:
1817 raise util.Abort(_('locking the remote repository failed'))
1819 raise util.Abort(_('locking the remote repository failed'))
1818 elif resp != 0:
1820 elif resp != 0:
1819 raise util.Abort(_('the server sent an unknown error code'))
1821 raise util.Abort(_('the server sent an unknown error code'))
1820 self.ui.status(_('streaming all changes\n'))
1822 self.ui.status(_('streaming all changes\n'))
1821 l = fp.readline()
1823 l = fp.readline()
1822 try:
1824 try:
1823 total_files, total_bytes = map(int, l.split(' ', 1))
1825 total_files, total_bytes = map(int, l.split(' ', 1))
1824 except ValueError, TypeError:
1826 except ValueError, TypeError:
1825 raise util.UnexpectedOutput(
1827 raise util.UnexpectedOutput(
1826 _('Unexpected response from remote server:'), l)
1828 _('Unexpected response from remote server:'), l)
1827 self.ui.status(_('%d files to transfer, %s of data\n') %
1829 self.ui.status(_('%d files to transfer, %s of data\n') %
1828 (total_files, util.bytecount(total_bytes)))
1830 (total_files, util.bytecount(total_bytes)))
1829 start = time.time()
1831 start = time.time()
1830 for i in xrange(total_files):
1832 for i in xrange(total_files):
1831 # XXX doesn't support '\n' or '\r' in filenames
1833 # XXX doesn't support '\n' or '\r' in filenames
1832 l = fp.readline()
1834 l = fp.readline()
1833 try:
1835 try:
1834 name, size = l.split('\0', 1)
1836 name, size = l.split('\0', 1)
1835 size = int(size)
1837 size = int(size)
1836 except ValueError, TypeError:
1838 except ValueError, TypeError:
1837 raise util.UnexpectedOutput(
1839 raise util.UnexpectedOutput(
1838 _('Unexpected response from remote server:'), l)
1840 _('Unexpected response from remote server:'), l)
1839 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1841 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1840 ofp = self.sopener(name, 'w')
1842 ofp = self.sopener(name, 'w')
1841 for chunk in util.filechunkiter(fp, limit=size):
1843 for chunk in util.filechunkiter(fp, limit=size):
1842 ofp.write(chunk)
1844 ofp.write(chunk)
1843 ofp.close()
1845 ofp.close()
1844 elapsed = time.time() - start
1846 elapsed = time.time() - start
1845 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1847 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1846 (util.bytecount(total_bytes), elapsed,
1848 (util.bytecount(total_bytes), elapsed,
1847 util.bytecount(total_bytes / elapsed)))
1849 util.bytecount(total_bytes / elapsed)))
1848 self.reload()
1850 self.reload()
1849 return len(self.heads()) + 1
1851 return len(self.heads()) + 1
1850
1852
1851 def clone(self, remote, heads=[], stream=False):
1853 def clone(self, remote, heads=[], stream=False):
1852 '''clone remote repository.
1854 '''clone remote repository.
1853
1855
1854 keyword arguments:
1856 keyword arguments:
1855 heads: list of revs to clone (forces use of pull)
1857 heads: list of revs to clone (forces use of pull)
1856 stream: use streaming clone if possible'''
1858 stream: use streaming clone if possible'''
1857
1859
1858 # now, all clients that can request uncompressed clones can
1860 # now, all clients that can request uncompressed clones can
1859 # read repo formats supported by all servers that can serve
1861 # read repo formats supported by all servers that can serve
1860 # them.
1862 # them.
1861
1863
1862 # if revlog format changes, client will have to check version
1864 # if revlog format changes, client will have to check version
1863 # and format flags on "stream" capability, and use
1865 # and format flags on "stream" capability, and use
1864 # uncompressed only if compatible.
1866 # uncompressed only if compatible.
1865
1867
1866 if stream and not heads and remote.capable('stream'):
1868 if stream and not heads and remote.capable('stream'):
1867 return self.stream_in(remote)
1869 return self.stream_in(remote)
1868 return self.pull(remote, heads)
1870 return self.pull(remote, heads)
1869
1871
1870 # used to avoid circular references so destructors work
1872 # used to avoid circular references so destructors work
1871 def aftertrans(files):
1873 def aftertrans(files):
1872 renamefiles = [tuple(t) for t in files]
1874 renamefiles = [tuple(t) for t in files]
1873 def a():
1875 def a():
1874 for src, dest in renamefiles:
1876 for src, dest in renamefiles:
1875 util.rename(src, dest)
1877 util.rename(src, dest)
1876 return a
1878 return a
1877
1879
1878 def instance(ui, path, create):
1880 def instance(ui, path, create):
1879 return localrepository(ui, util.drop_scheme('file', path), create)
1881 return localrepository(ui, util.drop_scheme('file', path), create)
1880
1882
1881 def islocal(path):
1883 def islocal(path):
1882 return True
1884 return True
General Comments 0
You need to be logged in to leave comments. Login now