##// END OF EJS Templates
commit: catch IOError...
Alexis S. L. Carvalho -
r4060:82eb0faf default
parent child Browse files
Show More
@@ -1,1903 +1,1903 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.filterpats = {}
120 self.filterpats = {}
121 self.transhandle = None
121 self.transhandle = None
122
122
123 self._link = lambda x: False
123 self._link = lambda x: False
124 if util.checklink(self.root):
124 if util.checklink(self.root):
125 r = self.root # avoid circular reference in lambda
125 r = self.root # avoid circular reference in lambda
126 self._link = lambda x: util.is_link(os.path.join(r, x))
126 self._link = lambda x: util.is_link(os.path.join(r, x))
127
127
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129
129
130 def url(self):
130 def url(self):
131 return 'file:' + self.root
131 return 'file:' + self.root
132
132
133 def hook(self, name, throw=False, **args):
133 def hook(self, name, throw=False, **args):
134 def callhook(hname, funcname):
134 def callhook(hname, funcname):
135 '''call python hook. hook is callable object, looked up as
135 '''call python hook. hook is callable object, looked up as
136 name in python module. if callable returns "true", hook
136 name in python module. if callable returns "true", hook
137 fails, else passes. if hook raises exception, treated as
137 fails, else passes. if hook raises exception, treated as
138 hook failure. exception propagates if throw is "true".
138 hook failure. exception propagates if throw is "true".
139
139
140 reason for "true" meaning "hook failed" is so that
140 reason for "true" meaning "hook failed" is so that
141 unmodified commands (e.g. mercurial.commands.update) can
141 unmodified commands (e.g. mercurial.commands.update) can
142 be run as hooks without wrappers to convert return values.'''
142 be run as hooks without wrappers to convert return values.'''
143
143
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 d = funcname.rfind('.')
145 d = funcname.rfind('.')
146 if d == -1:
146 if d == -1:
147 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
147 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
148 % (hname, funcname))
148 % (hname, funcname))
149 modname = funcname[:d]
149 modname = funcname[:d]
150 try:
150 try:
151 obj = __import__(modname)
151 obj = __import__(modname)
152 except ImportError:
152 except ImportError:
153 try:
153 try:
154 # extensions are loaded with hgext_ prefix
154 # extensions are loaded with hgext_ prefix
155 obj = __import__("hgext_%s" % modname)
155 obj = __import__("hgext_%s" % modname)
156 except ImportError:
156 except ImportError:
157 raise util.Abort(_('%s hook is invalid '
157 raise util.Abort(_('%s hook is invalid '
158 '(import of "%s" failed)') %
158 '(import of "%s" failed)') %
159 (hname, modname))
159 (hname, modname))
160 try:
160 try:
161 for p in funcname.split('.')[1:]:
161 for p in funcname.split('.')[1:]:
162 obj = getattr(obj, p)
162 obj = getattr(obj, p)
163 except AttributeError, err:
163 except AttributeError, err:
164 raise util.Abort(_('%s hook is invalid '
164 raise util.Abort(_('%s hook is invalid '
165 '("%s" is not defined)') %
165 '("%s" is not defined)') %
166 (hname, funcname))
166 (hname, funcname))
167 if not callable(obj):
167 if not callable(obj):
168 raise util.Abort(_('%s hook is invalid '
168 raise util.Abort(_('%s hook is invalid '
169 '("%s" is not callable)') %
169 '("%s" is not callable)') %
170 (hname, funcname))
170 (hname, funcname))
171 try:
171 try:
172 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
172 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
173 except (KeyboardInterrupt, util.SignalInterrupt):
173 except (KeyboardInterrupt, util.SignalInterrupt):
174 raise
174 raise
175 except Exception, exc:
175 except Exception, exc:
176 if isinstance(exc, util.Abort):
176 if isinstance(exc, util.Abort):
177 self.ui.warn(_('error: %s hook failed: %s\n') %
177 self.ui.warn(_('error: %s hook failed: %s\n') %
178 (hname, exc.args[0]))
178 (hname, exc.args[0]))
179 else:
179 else:
180 self.ui.warn(_('error: %s hook raised an exception: '
180 self.ui.warn(_('error: %s hook raised an exception: '
181 '%s\n') % (hname, exc))
181 '%s\n') % (hname, exc))
182 if throw:
182 if throw:
183 raise
183 raise
184 self.ui.print_exc()
184 self.ui.print_exc()
185 return True
185 return True
186 if r:
186 if r:
187 if throw:
187 if throw:
188 raise util.Abort(_('%s hook failed') % hname)
188 raise util.Abort(_('%s hook failed') % hname)
189 self.ui.warn(_('warning: %s hook failed\n') % hname)
189 self.ui.warn(_('warning: %s hook failed\n') % hname)
190 return r
190 return r
191
191
192 def runhook(name, cmd):
192 def runhook(name, cmd):
193 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
193 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
194 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
194 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
195 r = util.system(cmd, environ=env, cwd=self.root)
195 r = util.system(cmd, environ=env, cwd=self.root)
196 if r:
196 if r:
197 desc, r = util.explain_exit(r)
197 desc, r = util.explain_exit(r)
198 if throw:
198 if throw:
199 raise util.Abort(_('%s hook %s') % (name, desc))
199 raise util.Abort(_('%s hook %s') % (name, desc))
200 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
200 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
201 return r
201 return r
202
202
203 r = False
203 r = False
204 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
204 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
205 if hname.split(".", 1)[0] == name and cmd]
205 if hname.split(".", 1)[0] == name and cmd]
206 hooks.sort()
206 hooks.sort()
207 for hname, cmd in hooks:
207 for hname, cmd in hooks:
208 if cmd.startswith('python:'):
208 if cmd.startswith('python:'):
209 r = callhook(hname, cmd[7:].strip()) or r
209 r = callhook(hname, cmd[7:].strip()) or r
210 else:
210 else:
211 r = runhook(hname, cmd) or r
211 r = runhook(hname, cmd) or r
212 return r
212 return r
213
213
214 tag_disallowed = ':\r\n'
214 tag_disallowed = ':\r\n'
215
215
216 def tag(self, name, node, message, local, user, date):
216 def tag(self, name, node, message, local, user, date):
217 '''tag a revision with a symbolic name.
217 '''tag a revision with a symbolic name.
218
218
219 if local is True, the tag is stored in a per-repository file.
219 if local is True, the tag is stored in a per-repository file.
220 otherwise, it is stored in the .hgtags file, and a new
220 otherwise, it is stored in the .hgtags file, and a new
221 changeset is committed with the change.
221 changeset is committed with the change.
222
222
223 keyword arguments:
223 keyword arguments:
224
224
225 local: whether to store tag in non-version-controlled file
225 local: whether to store tag in non-version-controlled file
226 (default False)
226 (default False)
227
227
228 message: commit message to use if committing
228 message: commit message to use if committing
229
229
230 user: name of user to use if committing
230 user: name of user to use if committing
231
231
232 date: date tuple to use if committing'''
232 date: date tuple to use if committing'''
233
233
234 for c in self.tag_disallowed:
234 for c in self.tag_disallowed:
235 if c in name:
235 if c in name:
236 raise util.Abort(_('%r cannot be used in a tag name') % c)
236 raise util.Abort(_('%r cannot be used in a tag name') % c)
237
237
238 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
238 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
239
239
240 if local:
240 if local:
241 # local tags are stored in the current charset
241 # local tags are stored in the current charset
242 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
242 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
243 self.hook('tag', node=hex(node), tag=name, local=local)
243 self.hook('tag', node=hex(node), tag=name, local=local)
244 return
244 return
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251 # committed tags are stored in UTF-8
251 # committed tags are stored in UTF-8
252 line = '%s %s\n' % (hex(node), util.fromlocal(name))
252 line = '%s %s\n' % (hex(node), util.fromlocal(name))
253 self.wfile('.hgtags', 'ab').write(line)
253 self.wfile('.hgtags', 'ab').write(line)
254 if self.dirstate.state('.hgtags') == '?':
254 if self.dirstate.state('.hgtags') == '?':
255 self.add(['.hgtags'])
255 self.add(['.hgtags'])
256
256
257 self.commit(['.hgtags'], message, user, date)
257 self.commit(['.hgtags'], message, user, date)
258 self.hook('tag', node=hex(node), tag=name, local=local)
258 self.hook('tag', node=hex(node), tag=name, local=local)
259
259
260 def tags(self):
260 def tags(self):
261 '''return a mapping of tag to node'''
261 '''return a mapping of tag to node'''
262 if not self.tagscache:
262 if not self.tagscache:
263 self.tagscache = {}
263 self.tagscache = {}
264
264
265 def parsetag(line, context):
265 def parsetag(line, context):
266 if not line:
266 if not line:
267 return
267 return
268 s = l.split(" ", 1)
268 s = l.split(" ", 1)
269 if len(s) != 2:
269 if len(s) != 2:
270 self.ui.warn(_("%s: cannot parse entry\n") % context)
270 self.ui.warn(_("%s: cannot parse entry\n") % context)
271 return
271 return
272 node, key = s
272 node, key = s
273 key = util.tolocal(key.strip()) # stored in UTF-8
273 key = util.tolocal(key.strip()) # stored in UTF-8
274 try:
274 try:
275 bin_n = bin(node)
275 bin_n = bin(node)
276 except TypeError:
276 except TypeError:
277 self.ui.warn(_("%s: node '%s' is not well formed\n") %
277 self.ui.warn(_("%s: node '%s' is not well formed\n") %
278 (context, node))
278 (context, node))
279 return
279 return
280 if bin_n not in self.changelog.nodemap:
280 if bin_n not in self.changelog.nodemap:
281 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
281 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
282 (context, key))
282 (context, key))
283 return
283 return
284 self.tagscache[key] = bin_n
284 self.tagscache[key] = bin_n
285
285
286 # read the tags file from each head, ending with the tip,
286 # read the tags file from each head, ending with the tip,
287 # and add each tag found to the map, with "newer" ones
287 # and add each tag found to the map, with "newer" ones
288 # taking precedence
288 # taking precedence
289 f = None
289 f = None
290 for rev, node, fnode in self._hgtagsnodes():
290 for rev, node, fnode in self._hgtagsnodes():
291 f = (f and f.filectx(fnode) or
291 f = (f and f.filectx(fnode) or
292 self.filectx('.hgtags', fileid=fnode))
292 self.filectx('.hgtags', fileid=fnode))
293 count = 0
293 count = 0
294 for l in f.data().splitlines():
294 for l in f.data().splitlines():
295 count += 1
295 count += 1
296 parsetag(l, _("%s, line %d") % (str(f), count))
296 parsetag(l, _("%s, line %d") % (str(f), count))
297
297
298 try:
298 try:
299 f = self.opener("localtags")
299 f = self.opener("localtags")
300 count = 0
300 count = 0
301 for l in f:
301 for l in f:
302 # localtags are stored in the local character set
302 # localtags are stored in the local character set
303 # while the internal tag table is stored in UTF-8
303 # while the internal tag table is stored in UTF-8
304 l = util.fromlocal(l)
304 l = util.fromlocal(l)
305 count += 1
305 count += 1
306 parsetag(l, _("localtags, line %d") % count)
306 parsetag(l, _("localtags, line %d") % count)
307 except IOError:
307 except IOError:
308 pass
308 pass
309
309
310 self.tagscache['tip'] = self.changelog.tip()
310 self.tagscache['tip'] = self.changelog.tip()
311
311
312 return self.tagscache
312 return self.tagscache
313
313
314 def _hgtagsnodes(self):
314 def _hgtagsnodes(self):
315 heads = self.heads()
315 heads = self.heads()
316 heads.reverse()
316 heads.reverse()
317 last = {}
317 last = {}
318 ret = []
318 ret = []
319 for node in heads:
319 for node in heads:
320 c = self.changectx(node)
320 c = self.changectx(node)
321 rev = c.rev()
321 rev = c.rev()
322 try:
322 try:
323 fnode = c.filenode('.hgtags')
323 fnode = c.filenode('.hgtags')
324 except revlog.LookupError:
324 except revlog.LookupError:
325 continue
325 continue
326 ret.append((rev, node, fnode))
326 ret.append((rev, node, fnode))
327 if fnode in last:
327 if fnode in last:
328 ret[last[fnode]] = None
328 ret[last[fnode]] = None
329 last[fnode] = len(ret) - 1
329 last[fnode] = len(ret) - 1
330 return [item for item in ret if item]
330 return [item for item in ret if item]
331
331
332 def tagslist(self):
332 def tagslist(self):
333 '''return a list of tags ordered by revision'''
333 '''return a list of tags ordered by revision'''
334 l = []
334 l = []
335 for t, n in self.tags().items():
335 for t, n in self.tags().items():
336 try:
336 try:
337 r = self.changelog.rev(n)
337 r = self.changelog.rev(n)
338 except:
338 except:
339 r = -2 # sort to the beginning of the list if unknown
339 r = -2 # sort to the beginning of the list if unknown
340 l.append((r, t, n))
340 l.append((r, t, n))
341 l.sort()
341 l.sort()
342 return [(t, n) for r, t, n in l]
342 return [(t, n) for r, t, n in l]
343
343
344 def nodetags(self, node):
344 def nodetags(self, node):
345 '''return the tags associated with a node'''
345 '''return the tags associated with a node'''
346 if not self.nodetagscache:
346 if not self.nodetagscache:
347 self.nodetagscache = {}
347 self.nodetagscache = {}
348 for t, n in self.tags().items():
348 for t, n in self.tags().items():
349 self.nodetagscache.setdefault(n, []).append(t)
349 self.nodetagscache.setdefault(n, []).append(t)
350 return self.nodetagscache.get(node, [])
350 return self.nodetagscache.get(node, [])
351
351
352 def _branchtags(self):
352 def _branchtags(self):
353 partial, last, lrev = self._readbranchcache()
353 partial, last, lrev = self._readbranchcache()
354
354
355 tiprev = self.changelog.count() - 1
355 tiprev = self.changelog.count() - 1
356 if lrev != tiprev:
356 if lrev != tiprev:
357 self._updatebranchcache(partial, lrev+1, tiprev+1)
357 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._writebranchcache(partial, self.changelog.tip(), tiprev)
358 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359
359
360 return partial
360 return partial
361
361
362 def branchtags(self):
362 def branchtags(self):
363 if self.branchcache is not None:
363 if self.branchcache is not None:
364 return self.branchcache
364 return self.branchcache
365
365
366 self.branchcache = {} # avoid recursion in changectx
366 self.branchcache = {} # avoid recursion in changectx
367 partial = self._branchtags()
367 partial = self._branchtags()
368
368
369 # the branch cache is stored on disk as UTF-8, but in the local
369 # the branch cache is stored on disk as UTF-8, but in the local
370 # charset internally
370 # charset internally
371 for k, v in partial.items():
371 for k, v in partial.items():
372 self.branchcache[util.tolocal(k)] = v
372 self.branchcache[util.tolocal(k)] = v
373 return self.branchcache
373 return self.branchcache
374
374
375 def _readbranchcache(self):
375 def _readbranchcache(self):
376 partial = {}
376 partial = {}
377 try:
377 try:
378 f = self.opener("branches.cache")
378 f = self.opener("branches.cache")
379 lines = f.read().split('\n')
379 lines = f.read().split('\n')
380 f.close()
380 f.close()
381 last, lrev = lines.pop(0).rstrip().split(" ", 1)
381 last, lrev = lines.pop(0).rstrip().split(" ", 1)
382 last, lrev = bin(last), int(lrev)
382 last, lrev = bin(last), int(lrev)
383 if not (lrev < self.changelog.count() and
383 if not (lrev < self.changelog.count() and
384 self.changelog.node(lrev) == last): # sanity check
384 self.changelog.node(lrev) == last): # sanity check
385 # invalidate the cache
385 # invalidate the cache
386 raise ValueError('Invalid branch cache: unknown tip')
386 raise ValueError('Invalid branch cache: unknown tip')
387 for l in lines:
387 for l in lines:
388 if not l: continue
388 if not l: continue
389 node, label = l.rstrip().split(" ", 1)
389 node, label = l.rstrip().split(" ", 1)
390 partial[label] = bin(node)
390 partial[label] = bin(node)
391 except (KeyboardInterrupt, util.SignalInterrupt):
391 except (KeyboardInterrupt, util.SignalInterrupt):
392 raise
392 raise
393 except Exception, inst:
393 except Exception, inst:
394 if self.ui.debugflag:
394 if self.ui.debugflag:
395 self.ui.warn(str(inst), '\n')
395 self.ui.warn(str(inst), '\n')
396 partial, last, lrev = {}, nullid, nullrev
396 partial, last, lrev = {}, nullid, nullrev
397 return partial, last, lrev
397 return partial, last, lrev
398
398
399 def _writebranchcache(self, branches, tip, tiprev):
399 def _writebranchcache(self, branches, tip, tiprev):
400 try:
400 try:
401 f = self.opener("branches.cache", "w")
401 f = self.opener("branches.cache", "w")
402 f.write("%s %s\n" % (hex(tip), tiprev))
402 f.write("%s %s\n" % (hex(tip), tiprev))
403 for label, node in branches.iteritems():
403 for label, node in branches.iteritems():
404 f.write("%s %s\n" % (hex(node), label))
404 f.write("%s %s\n" % (hex(node), label))
405 except IOError:
405 except IOError:
406 pass
406 pass
407
407
408 def _updatebranchcache(self, partial, start, end):
408 def _updatebranchcache(self, partial, start, end):
409 for r in xrange(start, end):
409 for r in xrange(start, end):
410 c = self.changectx(r)
410 c = self.changectx(r)
411 b = c.branch()
411 b = c.branch()
412 if b:
412 if b:
413 partial[b] = c.node()
413 partial[b] = c.node()
414
414
415 def lookup(self, key):
415 def lookup(self, key):
416 if key == '.':
416 if key == '.':
417 key = self.dirstate.parents()[0]
417 key = self.dirstate.parents()[0]
418 if key == nullid:
418 if key == nullid:
419 raise repo.RepoError(_("no revision checked out"))
419 raise repo.RepoError(_("no revision checked out"))
420 elif key == 'null':
420 elif key == 'null':
421 return nullid
421 return nullid
422 n = self.changelog._match(key)
422 n = self.changelog._match(key)
423 if n:
423 if n:
424 return n
424 return n
425 if key in self.tags():
425 if key in self.tags():
426 return self.tags()[key]
426 return self.tags()[key]
427 if key in self.branchtags():
427 if key in self.branchtags():
428 return self.branchtags()[key]
428 return self.branchtags()[key]
429 n = self.changelog._partialmatch(key)
429 n = self.changelog._partialmatch(key)
430 if n:
430 if n:
431 return n
431 return n
432 raise repo.RepoError(_("unknown revision '%s'") % key)
432 raise repo.RepoError(_("unknown revision '%s'") % key)
433
433
434 def dev(self):
434 def dev(self):
435 return os.lstat(self.path).st_dev
435 return os.lstat(self.path).st_dev
436
436
437 def local(self):
437 def local(self):
438 return True
438 return True
439
439
440 def join(self, f):
440 def join(self, f):
441 return os.path.join(self.path, f)
441 return os.path.join(self.path, f)
442
442
443 def sjoin(self, f):
443 def sjoin(self, f):
444 f = self.encodefn(f)
444 f = self.encodefn(f)
445 return os.path.join(self.spath, f)
445 return os.path.join(self.spath, f)
446
446
447 def wjoin(self, f):
447 def wjoin(self, f):
448 return os.path.join(self.root, f)
448 return os.path.join(self.root, f)
449
449
450 def file(self, f):
450 def file(self, f):
451 if f[0] == '/':
451 if f[0] == '/':
452 f = f[1:]
452 f = f[1:]
453 return filelog.filelog(self.sopener, f, self.revlogversion)
453 return filelog.filelog(self.sopener, f, self.revlogversion)
454
454
455 def changectx(self, changeid=None):
455 def changectx(self, changeid=None):
456 return context.changectx(self, changeid)
456 return context.changectx(self, changeid)
457
457
458 def workingctx(self):
458 def workingctx(self):
459 return context.workingctx(self)
459 return context.workingctx(self)
460
460
461 def parents(self, changeid=None):
461 def parents(self, changeid=None):
462 '''
462 '''
463 get list of changectxs for parents of changeid or working directory
463 get list of changectxs for parents of changeid or working directory
464 '''
464 '''
465 if changeid is None:
465 if changeid is None:
466 pl = self.dirstate.parents()
466 pl = self.dirstate.parents()
467 else:
467 else:
468 n = self.changelog.lookup(changeid)
468 n = self.changelog.lookup(changeid)
469 pl = self.changelog.parents(n)
469 pl = self.changelog.parents(n)
470 if pl[1] == nullid:
470 if pl[1] == nullid:
471 return [self.changectx(pl[0])]
471 return [self.changectx(pl[0])]
472 return [self.changectx(pl[0]), self.changectx(pl[1])]
472 return [self.changectx(pl[0]), self.changectx(pl[1])]
473
473
474 def filectx(self, path, changeid=None, fileid=None):
474 def filectx(self, path, changeid=None, fileid=None):
475 """changeid can be a changeset revision, node, or tag.
475 """changeid can be a changeset revision, node, or tag.
476 fileid can be a file revision or node."""
476 fileid can be a file revision or node."""
477 return context.filectx(self, path, changeid, fileid)
477 return context.filectx(self, path, changeid, fileid)
478
478
479 def getcwd(self):
479 def getcwd(self):
480 return self.dirstate.getcwd()
480 return self.dirstate.getcwd()
481
481
482 def wfile(self, f, mode='r'):
482 def wfile(self, f, mode='r'):
483 return self.wopener(f, mode)
483 return self.wopener(f, mode)
484
484
485 def _filter(self, filter, filename, data):
485 def _filter(self, filter, filename, data):
486 if filter not in self.filterpats:
486 if filter not in self.filterpats:
487 l = []
487 l = []
488 for pat, cmd in self.ui.configitems(filter):
488 for pat, cmd in self.ui.configitems(filter):
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
490 l.append((mf, cmd))
490 l.append((mf, cmd))
491 self.filterpats[filter] = l
491 self.filterpats[filter] = l
492
492
493 for mf, cmd in self.filterpats[filter]:
493 for mf, cmd in self.filterpats[filter]:
494 if mf(filename):
494 if mf(filename):
495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
496 data = util.filter(data, cmd)
496 data = util.filter(data, cmd)
497 break
497 break
498
498
499 return data
499 return data
500
500
501 def wread(self, filename):
501 def wread(self, filename):
502 if self._link(filename):
502 if self._link(filename):
503 data = os.readlink(self.wjoin(filename))
503 data = os.readlink(self.wjoin(filename))
504 else:
504 else:
505 data = self.wopener(filename, 'r').read()
505 data = self.wopener(filename, 'r').read()
506 return self._filter("encode", filename, data)
506 return self._filter("encode", filename, data)
507
507
508 def wwrite(self, filename, data, flags):
508 def wwrite(self, filename, data, flags):
509 data = self._filter("decode", filename, data)
509 data = self._filter("decode", filename, data)
510 if "l" in flags:
510 if "l" in flags:
511 try:
511 try:
512 os.unlink(self.wjoin(filename))
512 os.unlink(self.wjoin(filename))
513 except OSError:
513 except OSError:
514 pass
514 pass
515 os.symlink(data, self.wjoin(filename))
515 os.symlink(data, self.wjoin(filename))
516 else:
516 else:
517 try:
517 try:
518 if self._link(filename):
518 if self._link(filename):
519 os.unlink(self.wjoin(filename))
519 os.unlink(self.wjoin(filename))
520 except OSError:
520 except OSError:
521 pass
521 pass
522 self.wopener(filename, 'w').write(data)
522 self.wopener(filename, 'w').write(data)
523 util.set_exec(self.wjoin(filename), "x" in flags)
523 util.set_exec(self.wjoin(filename), "x" in flags)
524
524
525 def wwritedata(self, filename, data):
525 def wwritedata(self, filename, data):
526 return self._filter("decode", filename, data)
526 return self._filter("decode", filename, data)
527
527
528 def transaction(self):
528 def transaction(self):
529 tr = self.transhandle
529 tr = self.transhandle
530 if tr != None and tr.running():
530 if tr != None and tr.running():
531 return tr.nest()
531 return tr.nest()
532
532
533 # save dirstate for rollback
533 # save dirstate for rollback
534 try:
534 try:
535 ds = self.opener("dirstate").read()
535 ds = self.opener("dirstate").read()
536 except IOError:
536 except IOError:
537 ds = ""
537 ds = ""
538 self.opener("journal.dirstate", "w").write(ds)
538 self.opener("journal.dirstate", "w").write(ds)
539
539
540 renames = [(self.sjoin("journal"), self.sjoin("undo")),
540 renames = [(self.sjoin("journal"), self.sjoin("undo")),
541 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
541 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
542 tr = transaction.transaction(self.ui.warn, self.sopener,
542 tr = transaction.transaction(self.ui.warn, self.sopener,
543 self.sjoin("journal"),
543 self.sjoin("journal"),
544 aftertrans(renames))
544 aftertrans(renames))
545 self.transhandle = tr
545 self.transhandle = tr
546 return tr
546 return tr
547
547
548 def recover(self):
548 def recover(self):
549 l = self.lock()
549 l = self.lock()
550 if os.path.exists(self.sjoin("journal")):
550 if os.path.exists(self.sjoin("journal")):
551 self.ui.status(_("rolling back interrupted transaction\n"))
551 self.ui.status(_("rolling back interrupted transaction\n"))
552 transaction.rollback(self.sopener, self.sjoin("journal"))
552 transaction.rollback(self.sopener, self.sjoin("journal"))
553 self.reload()
553 self.reload()
554 return True
554 return True
555 else:
555 else:
556 self.ui.warn(_("no interrupted transaction available\n"))
556 self.ui.warn(_("no interrupted transaction available\n"))
557 return False
557 return False
558
558
559 def rollback(self, wlock=None):
559 def rollback(self, wlock=None):
560 if not wlock:
560 if not wlock:
561 wlock = self.wlock()
561 wlock = self.wlock()
562 l = self.lock()
562 l = self.lock()
563 if os.path.exists(self.sjoin("undo")):
563 if os.path.exists(self.sjoin("undo")):
564 self.ui.status(_("rolling back last transaction\n"))
564 self.ui.status(_("rolling back last transaction\n"))
565 transaction.rollback(self.sopener, self.sjoin("undo"))
565 transaction.rollback(self.sopener, self.sjoin("undo"))
566 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
566 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
567 self.reload()
567 self.reload()
568 self.wreload()
568 self.wreload()
569 else:
569 else:
570 self.ui.warn(_("no rollback information available\n"))
570 self.ui.warn(_("no rollback information available\n"))
571
571
572 def wreload(self):
572 def wreload(self):
573 self.dirstate.read()
573 self.dirstate.read()
574
574
575 def reload(self):
575 def reload(self):
576 self.changelog.load()
576 self.changelog.load()
577 self.manifest.load()
577 self.manifest.load()
578 self.tagscache = None
578 self.tagscache = None
579 self.nodetagscache = None
579 self.nodetagscache = None
580
580
581 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
581 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
582 desc=None):
582 desc=None):
583 try:
583 try:
584 l = lock.lock(lockname, 0, releasefn, desc=desc)
584 l = lock.lock(lockname, 0, releasefn, desc=desc)
585 except lock.LockHeld, inst:
585 except lock.LockHeld, inst:
586 if not wait:
586 if not wait:
587 raise
587 raise
588 self.ui.warn(_("waiting for lock on %s held by %r\n") %
588 self.ui.warn(_("waiting for lock on %s held by %r\n") %
589 (desc, inst.locker))
589 (desc, inst.locker))
590 # default to 600 seconds timeout
590 # default to 600 seconds timeout
591 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
591 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
592 releasefn, desc=desc)
592 releasefn, desc=desc)
593 if acquirefn:
593 if acquirefn:
594 acquirefn()
594 acquirefn()
595 return l
595 return l
596
596
597 def lock(self, wait=1):
597 def lock(self, wait=1):
598 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
598 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
599 desc=_('repository %s') % self.origroot)
599 desc=_('repository %s') % self.origroot)
600
600
601 def wlock(self, wait=1):
601 def wlock(self, wait=1):
602 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
602 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
603 self.wreload,
603 self.wreload,
604 desc=_('working directory of %s') % self.origroot)
604 desc=_('working directory of %s') % self.origroot)
605
605
606 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
606 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
607 """
607 """
608 commit an individual file as part of a larger transaction
608 commit an individual file as part of a larger transaction
609 """
609 """
610
610
611 t = self.wread(fn)
611 t = self.wread(fn)
612 fl = self.file(fn)
612 fl = self.file(fn)
613 fp1 = manifest1.get(fn, nullid)
613 fp1 = manifest1.get(fn, nullid)
614 fp2 = manifest2.get(fn, nullid)
614 fp2 = manifest2.get(fn, nullid)
615
615
616 meta = {}
616 meta = {}
617 cp = self.dirstate.copied(fn)
617 cp = self.dirstate.copied(fn)
618 if cp:
618 if cp:
619 # Mark the new revision of this file as a copy of another
619 # Mark the new revision of this file as a copy of another
620 # file. This copy data will effectively act as a parent
620 # file. This copy data will effectively act as a parent
621 # of this new revision. If this is a merge, the first
621 # of this new revision. If this is a merge, the first
622 # parent will be the nullid (meaning "look up the copy data")
622 # parent will be the nullid (meaning "look up the copy data")
623 # and the second one will be the other parent. For example:
623 # and the second one will be the other parent. For example:
624 #
624 #
625 # 0 --- 1 --- 3 rev1 changes file foo
625 # 0 --- 1 --- 3 rev1 changes file foo
626 # \ / rev2 renames foo to bar and changes it
626 # \ / rev2 renames foo to bar and changes it
627 # \- 2 -/ rev3 should have bar with all changes and
627 # \- 2 -/ rev3 should have bar with all changes and
628 # should record that bar descends from
628 # should record that bar descends from
629 # bar in rev2 and foo in rev1
629 # bar in rev2 and foo in rev1
630 #
630 #
631 # this allows this merge to succeed:
631 # this allows this merge to succeed:
632 #
632 #
633 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
633 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
634 # \ / merging rev3 and rev4 should use bar@rev2
634 # \ / merging rev3 and rev4 should use bar@rev2
635 # \- 2 --- 4 as the merge base
635 # \- 2 --- 4 as the merge base
636 #
636 #
637 meta["copy"] = cp
637 meta["copy"] = cp
638 if not manifest2: # not a branch merge
638 if not manifest2: # not a branch merge
639 meta["copyrev"] = hex(manifest1.get(cp, nullid))
639 meta["copyrev"] = hex(manifest1.get(cp, nullid))
640 fp2 = nullid
640 fp2 = nullid
641 elif fp2 != nullid: # copied on remote side
641 elif fp2 != nullid: # copied on remote side
642 meta["copyrev"] = hex(manifest1.get(cp, nullid))
642 meta["copyrev"] = hex(manifest1.get(cp, nullid))
643 elif fp1 != nullid: # copied on local side, reversed
643 elif fp1 != nullid: # copied on local side, reversed
644 meta["copyrev"] = hex(manifest2.get(cp))
644 meta["copyrev"] = hex(manifest2.get(cp))
645 fp2 = fp1
645 fp2 = fp1
646 else: # directory rename
646 else: # directory rename
647 meta["copyrev"] = hex(manifest1.get(cp, nullid))
647 meta["copyrev"] = hex(manifest1.get(cp, nullid))
648 self.ui.debug(_(" %s: copy %s:%s\n") %
648 self.ui.debug(_(" %s: copy %s:%s\n") %
649 (fn, cp, meta["copyrev"]))
649 (fn, cp, meta["copyrev"]))
650 fp1 = nullid
650 fp1 = nullid
651 elif fp2 != nullid:
651 elif fp2 != nullid:
652 # is one parent an ancestor of the other?
652 # is one parent an ancestor of the other?
653 fpa = fl.ancestor(fp1, fp2)
653 fpa = fl.ancestor(fp1, fp2)
654 if fpa == fp1:
654 if fpa == fp1:
655 fp1, fp2 = fp2, nullid
655 fp1, fp2 = fp2, nullid
656 elif fpa == fp2:
656 elif fpa == fp2:
657 fp2 = nullid
657 fp2 = nullid
658
658
659 # is the file unmodified from the parent? report existing entry
659 # is the file unmodified from the parent? report existing entry
660 if fp2 == nullid and not fl.cmp(fp1, t):
660 if fp2 == nullid and not fl.cmp(fp1, t):
661 return fp1
661 return fp1
662
662
663 changelist.append(fn)
663 changelist.append(fn)
664 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
664 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
665
665
666 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
666 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
667 if p1 is None:
667 if p1 is None:
668 p1, p2 = self.dirstate.parents()
668 p1, p2 = self.dirstate.parents()
669 return self.commit(files=files, text=text, user=user, date=date,
669 return self.commit(files=files, text=text, user=user, date=date,
670 p1=p1, p2=p2, wlock=wlock, extra=extra)
670 p1=p1, p2=p2, wlock=wlock, extra=extra)
671
671
672 def commit(self, files=None, text="", user=None, date=None,
672 def commit(self, files=None, text="", user=None, date=None,
673 match=util.always, force=False, lock=None, wlock=None,
673 match=util.always, force=False, lock=None, wlock=None,
674 force_editor=False, p1=None, p2=None, extra={}):
674 force_editor=False, p1=None, p2=None, extra={}):
675
675
676 commit = []
676 commit = []
677 remove = []
677 remove = []
678 changed = []
678 changed = []
679 use_dirstate = (p1 is None) # not rawcommit
679 use_dirstate = (p1 is None) # not rawcommit
680 extra = extra.copy()
680 extra = extra.copy()
681
681
682 if use_dirstate:
682 if use_dirstate:
683 if files:
683 if files:
684 for f in files:
684 for f in files:
685 s = self.dirstate.state(f)
685 s = self.dirstate.state(f)
686 if s in 'nmai':
686 if s in 'nmai':
687 commit.append(f)
687 commit.append(f)
688 elif s == 'r':
688 elif s == 'r':
689 remove.append(f)
689 remove.append(f)
690 else:
690 else:
691 self.ui.warn(_("%s not tracked!\n") % f)
691 self.ui.warn(_("%s not tracked!\n") % f)
692 else:
692 else:
693 changes = self.status(match=match)[:5]
693 changes = self.status(match=match)[:5]
694 modified, added, removed, deleted, unknown = changes
694 modified, added, removed, deleted, unknown = changes
695 commit = modified + added
695 commit = modified + added
696 remove = removed
696 remove = removed
697 else:
697 else:
698 commit = files
698 commit = files
699
699
700 if use_dirstate:
700 if use_dirstate:
701 p1, p2 = self.dirstate.parents()
701 p1, p2 = self.dirstate.parents()
702 update_dirstate = True
702 update_dirstate = True
703 else:
703 else:
704 p1, p2 = p1, p2 or nullid
704 p1, p2 = p1, p2 or nullid
705 update_dirstate = (self.dirstate.parents()[0] == p1)
705 update_dirstate = (self.dirstate.parents()[0] == p1)
706
706
707 c1 = self.changelog.read(p1)
707 c1 = self.changelog.read(p1)
708 c2 = self.changelog.read(p2)
708 c2 = self.changelog.read(p2)
709 m1 = self.manifest.read(c1[0]).copy()
709 m1 = self.manifest.read(c1[0]).copy()
710 m2 = self.manifest.read(c2[0])
710 m2 = self.manifest.read(c2[0])
711
711
712 if use_dirstate:
712 if use_dirstate:
713 branchname = self.workingctx().branch()
713 branchname = self.workingctx().branch()
714 try:
714 try:
715 branchname = branchname.decode('UTF-8').encode('UTF-8')
715 branchname = branchname.decode('UTF-8').encode('UTF-8')
716 except UnicodeDecodeError:
716 except UnicodeDecodeError:
717 raise util.Abort(_('branch name not in UTF-8!'))
717 raise util.Abort(_('branch name not in UTF-8!'))
718 else:
718 else:
719 branchname = ""
719 branchname = ""
720
720
721 if use_dirstate:
721 if use_dirstate:
722 oldname = c1[5].get("branch", "") # stored in UTF-8
722 oldname = c1[5].get("branch", "") # stored in UTF-8
723 if not commit and not remove and not force and p2 == nullid and \
723 if not commit and not remove and not force and p2 == nullid and \
724 branchname == oldname:
724 branchname == oldname:
725 self.ui.status(_("nothing changed\n"))
725 self.ui.status(_("nothing changed\n"))
726 return None
726 return None
727
727
728 xp1 = hex(p1)
728 xp1 = hex(p1)
729 if p2 == nullid: xp2 = ''
729 if p2 == nullid: xp2 = ''
730 else: xp2 = hex(p2)
730 else: xp2 = hex(p2)
731
731
732 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
732 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
733
733
734 if not wlock:
734 if not wlock:
735 wlock = self.wlock()
735 wlock = self.wlock()
736 if not lock:
736 if not lock:
737 lock = self.lock()
737 lock = self.lock()
738 tr = self.transaction()
738 tr = self.transaction()
739
739
740 # check in files
740 # check in files
741 new = {}
741 new = {}
742 linkrev = self.changelog.count()
742 linkrev = self.changelog.count()
743 commit.sort()
743 commit.sort()
744 is_exec = util.execfunc(self.root, m1.execf)
744 is_exec = util.execfunc(self.root, m1.execf)
745 is_link = util.linkfunc(self.root, m1.linkf)
745 is_link = util.linkfunc(self.root, m1.linkf)
746 for f in commit:
746 for f in commit:
747 self.ui.note(f + "\n")
747 self.ui.note(f + "\n")
748 try:
748 try:
749 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
749 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
750 m1.set(f, is_exec(f), is_link(f))
750 m1.set(f, is_exec(f), is_link(f))
751 except OSError:
751 except (OSError, IOError):
752 if use_dirstate:
752 if use_dirstate:
753 self.ui.warn(_("trouble committing %s!\n") % f)
753 self.ui.warn(_("trouble committing %s!\n") % f)
754 raise
754 raise
755 else:
755 else:
756 remove.append(f)
756 remove.append(f)
757
757
758 # update manifest
758 # update manifest
759 m1.update(new)
759 m1.update(new)
760 remove.sort()
760 remove.sort()
761 removed = []
761 removed = []
762
762
763 for f in remove:
763 for f in remove:
764 if f in m1:
764 if f in m1:
765 del m1[f]
765 del m1[f]
766 removed.append(f)
766 removed.append(f)
767 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
767 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
768
768
769 # add changeset
769 # add changeset
770 new = new.keys()
770 new = new.keys()
771 new.sort()
771 new.sort()
772
772
773 user = user or self.ui.username()
773 user = user or self.ui.username()
774 if not text or force_editor:
774 if not text or force_editor:
775 edittext = []
775 edittext = []
776 if text:
776 if text:
777 edittext.append(text)
777 edittext.append(text)
778 edittext.append("")
778 edittext.append("")
779 edittext.append("HG: user: %s" % user)
779 edittext.append("HG: user: %s" % user)
780 if p2 != nullid:
780 if p2 != nullid:
781 edittext.append("HG: branch merge")
781 edittext.append("HG: branch merge")
782 if branchname:
782 if branchname:
783 edittext.append("HG: branch %s" % util.tolocal(branchname))
783 edittext.append("HG: branch %s" % util.tolocal(branchname))
784 edittext.extend(["HG: changed %s" % f for f in changed])
784 edittext.extend(["HG: changed %s" % f for f in changed])
785 edittext.extend(["HG: removed %s" % f for f in removed])
785 edittext.extend(["HG: removed %s" % f for f in removed])
786 if not changed and not remove:
786 if not changed and not remove:
787 edittext.append("HG: no files changed")
787 edittext.append("HG: no files changed")
788 edittext.append("")
788 edittext.append("")
789 # run editor in the repository root
789 # run editor in the repository root
790 olddir = os.getcwd()
790 olddir = os.getcwd()
791 os.chdir(self.root)
791 os.chdir(self.root)
792 text = self.ui.edit("\n".join(edittext), user)
792 text = self.ui.edit("\n".join(edittext), user)
793 os.chdir(olddir)
793 os.chdir(olddir)
794
794
795 lines = [line.rstrip() for line in text.rstrip().splitlines()]
795 lines = [line.rstrip() for line in text.rstrip().splitlines()]
796 while lines and not lines[0]:
796 while lines and not lines[0]:
797 del lines[0]
797 del lines[0]
798 if not lines:
798 if not lines:
799 return None
799 return None
800 text = '\n'.join(lines)
800 text = '\n'.join(lines)
801 if branchname:
801 if branchname:
802 extra["branch"] = branchname
802 extra["branch"] = branchname
803 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
803 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
804 user, date, extra)
804 user, date, extra)
805 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
805 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
806 parent2=xp2)
806 parent2=xp2)
807 tr.close()
807 tr.close()
808
808
809 if self.branchcache and "branch" in extra:
809 if self.branchcache and "branch" in extra:
810 self.branchcache[util.tolocal(extra["branch"])] = n
810 self.branchcache[util.tolocal(extra["branch"])] = n
811
811
812 if use_dirstate or update_dirstate:
812 if use_dirstate or update_dirstate:
813 self.dirstate.setparents(n)
813 self.dirstate.setparents(n)
814 if use_dirstate:
814 if use_dirstate:
815 self.dirstate.update(new, "n")
815 self.dirstate.update(new, "n")
816 self.dirstate.forget(removed)
816 self.dirstate.forget(removed)
817
817
818 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
818 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
819 return n
819 return n
820
820
821 def walk(self, node=None, files=[], match=util.always, badmatch=None):
821 def walk(self, node=None, files=[], match=util.always, badmatch=None):
822 '''
822 '''
823 walk recursively through the directory tree or a given
823 walk recursively through the directory tree or a given
824 changeset, finding all files matched by the match
824 changeset, finding all files matched by the match
825 function
825 function
826
826
827 results are yielded in a tuple (src, filename), where src
827 results are yielded in a tuple (src, filename), where src
828 is one of:
828 is one of:
829 'f' the file was found in the directory tree
829 'f' the file was found in the directory tree
830 'm' the file was only in the dirstate and not in the tree
830 'm' the file was only in the dirstate and not in the tree
831 'b' file was not found and matched badmatch
831 'b' file was not found and matched badmatch
832 '''
832 '''
833
833
834 if node:
834 if node:
835 fdict = dict.fromkeys(files)
835 fdict = dict.fromkeys(files)
836 for fn in self.manifest.read(self.changelog.read(node)[0]):
836 for fn in self.manifest.read(self.changelog.read(node)[0]):
837 for ffn in fdict:
837 for ffn in fdict:
838 # match if the file is the exact name or a directory
838 # match if the file is the exact name or a directory
839 if ffn == fn or fn.startswith("%s/" % ffn):
839 if ffn == fn or fn.startswith("%s/" % ffn):
840 del fdict[ffn]
840 del fdict[ffn]
841 break
841 break
842 if match(fn):
842 if match(fn):
843 yield 'm', fn
843 yield 'm', fn
844 for fn in fdict:
844 for fn in fdict:
845 if badmatch and badmatch(fn):
845 if badmatch and badmatch(fn):
846 if match(fn):
846 if match(fn):
847 yield 'b', fn
847 yield 'b', fn
848 else:
848 else:
849 self.ui.warn(_('%s: No such file in rev %s\n') % (
849 self.ui.warn(_('%s: No such file in rev %s\n') % (
850 util.pathto(self.getcwd(), fn), short(node)))
850 util.pathto(self.getcwd(), fn), short(node)))
851 else:
851 else:
852 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
852 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
853 yield src, fn
853 yield src, fn
854
854
855 def status(self, node1=None, node2=None, files=[], match=util.always,
855 def status(self, node1=None, node2=None, files=[], match=util.always,
856 wlock=None, list_ignored=False, list_clean=False):
856 wlock=None, list_ignored=False, list_clean=False):
857 """return status of files between two nodes or node and working directory
857 """return status of files between two nodes or node and working directory
858
858
859 If node1 is None, use the first dirstate parent instead.
859 If node1 is None, use the first dirstate parent instead.
860 If node2 is None, compare node1 with working directory.
860 If node2 is None, compare node1 with working directory.
861 """
861 """
862
862
863 def fcmp(fn, mf):
863 def fcmp(fn, mf):
864 t1 = self.wread(fn)
864 t1 = self.wread(fn)
865 return self.file(fn).cmp(mf.get(fn, nullid), t1)
865 return self.file(fn).cmp(mf.get(fn, nullid), t1)
866
866
867 def mfmatches(node):
867 def mfmatches(node):
868 change = self.changelog.read(node)
868 change = self.changelog.read(node)
869 mf = self.manifest.read(change[0]).copy()
869 mf = self.manifest.read(change[0]).copy()
870 for fn in mf.keys():
870 for fn in mf.keys():
871 if not match(fn):
871 if not match(fn):
872 del mf[fn]
872 del mf[fn]
873 return mf
873 return mf
874
874
875 modified, added, removed, deleted, unknown = [], [], [], [], []
875 modified, added, removed, deleted, unknown = [], [], [], [], []
876 ignored, clean = [], []
876 ignored, clean = [], []
877
877
878 compareworking = False
878 compareworking = False
879 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
879 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
880 compareworking = True
880 compareworking = True
881
881
882 if not compareworking:
882 if not compareworking:
883 # read the manifest from node1 before the manifest from node2,
883 # read the manifest from node1 before the manifest from node2,
884 # so that we'll hit the manifest cache if we're going through
884 # so that we'll hit the manifest cache if we're going through
885 # all the revisions in parent->child order.
885 # all the revisions in parent->child order.
886 mf1 = mfmatches(node1)
886 mf1 = mfmatches(node1)
887
887
888 # are we comparing the working directory?
888 # are we comparing the working directory?
889 if not node2:
889 if not node2:
890 if not wlock:
890 if not wlock:
891 try:
891 try:
892 wlock = self.wlock(wait=0)
892 wlock = self.wlock(wait=0)
893 except lock.LockException:
893 except lock.LockException:
894 wlock = None
894 wlock = None
895 (lookup, modified, added, removed, deleted, unknown,
895 (lookup, modified, added, removed, deleted, unknown,
896 ignored, clean) = self.dirstate.status(files, match,
896 ignored, clean) = self.dirstate.status(files, match,
897 list_ignored, list_clean)
897 list_ignored, list_clean)
898
898
899 # are we comparing working dir against its parent?
899 # are we comparing working dir against its parent?
900 if compareworking:
900 if compareworking:
901 if lookup:
901 if lookup:
902 # do a full compare of any files that might have changed
902 # do a full compare of any files that might have changed
903 mf2 = mfmatches(self.dirstate.parents()[0])
903 mf2 = mfmatches(self.dirstate.parents()[0])
904 for f in lookup:
904 for f in lookup:
905 if fcmp(f, mf2):
905 if fcmp(f, mf2):
906 modified.append(f)
906 modified.append(f)
907 else:
907 else:
908 clean.append(f)
908 clean.append(f)
909 if wlock is not None:
909 if wlock is not None:
910 self.dirstate.update([f], "n")
910 self.dirstate.update([f], "n")
911 else:
911 else:
912 # we are comparing working dir against non-parent
912 # we are comparing working dir against non-parent
913 # generate a pseudo-manifest for the working dir
913 # generate a pseudo-manifest for the working dir
914 # XXX: create it in dirstate.py ?
914 # XXX: create it in dirstate.py ?
915 mf2 = mfmatches(self.dirstate.parents()[0])
915 mf2 = mfmatches(self.dirstate.parents()[0])
916 is_exec = util.execfunc(self.root, mf2.execf)
916 is_exec = util.execfunc(self.root, mf2.execf)
917 is_link = util.linkfunc(self.root, mf2.linkf)
917 is_link = util.linkfunc(self.root, mf2.linkf)
918 for f in lookup + modified + added:
918 for f in lookup + modified + added:
919 mf2[f] = ""
919 mf2[f] = ""
920 mf2.set(f, is_exec(f), is_link(f))
920 mf2.set(f, is_exec(f), is_link(f))
921 for f in removed:
921 for f in removed:
922 if f in mf2:
922 if f in mf2:
923 del mf2[f]
923 del mf2[f]
924 else:
924 else:
925 # we are comparing two revisions
925 # we are comparing two revisions
926 mf2 = mfmatches(node2)
926 mf2 = mfmatches(node2)
927
927
928 if not compareworking:
928 if not compareworking:
929 # flush lists from dirstate before comparing manifests
929 # flush lists from dirstate before comparing manifests
930 modified, added, clean = [], [], []
930 modified, added, clean = [], [], []
931
931
932 # make sure to sort the files so we talk to the disk in a
932 # make sure to sort the files so we talk to the disk in a
933 # reasonable order
933 # reasonable order
934 mf2keys = mf2.keys()
934 mf2keys = mf2.keys()
935 mf2keys.sort()
935 mf2keys.sort()
936 for fn in mf2keys:
936 for fn in mf2keys:
937 if mf1.has_key(fn):
937 if mf1.has_key(fn):
938 if mf1.flags(fn) != mf2.flags(fn) or \
938 if mf1.flags(fn) != mf2.flags(fn) or \
939 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
939 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
940 modified.append(fn)
940 modified.append(fn)
941 elif list_clean:
941 elif list_clean:
942 clean.append(fn)
942 clean.append(fn)
943 del mf1[fn]
943 del mf1[fn]
944 else:
944 else:
945 added.append(fn)
945 added.append(fn)
946
946
947 removed = mf1.keys()
947 removed = mf1.keys()
948
948
949 # sort and return results:
949 # sort and return results:
950 for l in modified, added, removed, deleted, unknown, ignored, clean:
950 for l in modified, added, removed, deleted, unknown, ignored, clean:
951 l.sort()
951 l.sort()
952 return (modified, added, removed, deleted, unknown, ignored, clean)
952 return (modified, added, removed, deleted, unknown, ignored, clean)
953
953
954 def add(self, list, wlock=None):
954 def add(self, list, wlock=None):
955 if not wlock:
955 if not wlock:
956 wlock = self.wlock()
956 wlock = self.wlock()
957 for f in list:
957 for f in list:
958 p = self.wjoin(f)
958 p = self.wjoin(f)
959 islink = os.path.islink(p)
959 islink = os.path.islink(p)
960 if not islink and not os.path.exists(p):
960 if not islink and not os.path.exists(p):
961 self.ui.warn(_("%s does not exist!\n") % f)
961 self.ui.warn(_("%s does not exist!\n") % f)
962 elif not islink and not os.path.isfile(p):
962 elif not islink and not os.path.isfile(p):
963 self.ui.warn(_("%s not added: only files and symlinks "
963 self.ui.warn(_("%s not added: only files and symlinks "
964 "supported currently\n") % f)
964 "supported currently\n") % f)
965 elif self.dirstate.state(f) in 'an':
965 elif self.dirstate.state(f) in 'an':
966 self.ui.warn(_("%s already tracked!\n") % f)
966 self.ui.warn(_("%s already tracked!\n") % f)
967 else:
967 else:
968 self.dirstate.update([f], "a")
968 self.dirstate.update([f], "a")
969
969
970 def forget(self, list, wlock=None):
970 def forget(self, list, wlock=None):
971 if not wlock:
971 if not wlock:
972 wlock = self.wlock()
972 wlock = self.wlock()
973 for f in list:
973 for f in list:
974 if self.dirstate.state(f) not in 'ai':
974 if self.dirstate.state(f) not in 'ai':
975 self.ui.warn(_("%s not added!\n") % f)
975 self.ui.warn(_("%s not added!\n") % f)
976 else:
976 else:
977 self.dirstate.forget([f])
977 self.dirstate.forget([f])
978
978
979 def remove(self, list, unlink=False, wlock=None):
979 def remove(self, list, unlink=False, wlock=None):
980 if unlink:
980 if unlink:
981 for f in list:
981 for f in list:
982 try:
982 try:
983 util.unlink(self.wjoin(f))
983 util.unlink(self.wjoin(f))
984 except OSError, inst:
984 except OSError, inst:
985 if inst.errno != errno.ENOENT:
985 if inst.errno != errno.ENOENT:
986 raise
986 raise
987 if not wlock:
987 if not wlock:
988 wlock = self.wlock()
988 wlock = self.wlock()
989 for f in list:
989 for f in list:
990 p = self.wjoin(f)
990 p = self.wjoin(f)
991 if os.path.exists(p):
991 if os.path.exists(p):
992 self.ui.warn(_("%s still exists!\n") % f)
992 self.ui.warn(_("%s still exists!\n") % f)
993 elif self.dirstate.state(f) == 'a':
993 elif self.dirstate.state(f) == 'a':
994 self.dirstate.forget([f])
994 self.dirstate.forget([f])
995 elif f not in self.dirstate:
995 elif f not in self.dirstate:
996 self.ui.warn(_("%s not tracked!\n") % f)
996 self.ui.warn(_("%s not tracked!\n") % f)
997 else:
997 else:
998 self.dirstate.update([f], "r")
998 self.dirstate.update([f], "r")
999
999
1000 def undelete(self, list, wlock=None):
1000 def undelete(self, list, wlock=None):
1001 p = self.dirstate.parents()[0]
1001 p = self.dirstate.parents()[0]
1002 mn = self.changelog.read(p)[0]
1002 mn = self.changelog.read(p)[0]
1003 m = self.manifest.read(mn)
1003 m = self.manifest.read(mn)
1004 if not wlock:
1004 if not wlock:
1005 wlock = self.wlock()
1005 wlock = self.wlock()
1006 for f in list:
1006 for f in list:
1007 if self.dirstate.state(f) not in "r":
1007 if self.dirstate.state(f) not in "r":
1008 self.ui.warn("%s not removed!\n" % f)
1008 self.ui.warn("%s not removed!\n" % f)
1009 else:
1009 else:
1010 t = self.file(f).read(m[f])
1010 t = self.file(f).read(m[f])
1011 self.wwrite(f, t, m.flags(f))
1011 self.wwrite(f, t, m.flags(f))
1012 self.dirstate.update([f], "n")
1012 self.dirstate.update([f], "n")
1013
1013
1014 def copy(self, source, dest, wlock=None):
1014 def copy(self, source, dest, wlock=None):
1015 p = self.wjoin(dest)
1015 p = self.wjoin(dest)
1016 if not os.path.exists(p):
1016 if not os.path.exists(p):
1017 self.ui.warn(_("%s does not exist!\n") % dest)
1017 self.ui.warn(_("%s does not exist!\n") % dest)
1018 elif not os.path.isfile(p):
1018 elif not os.path.isfile(p):
1019 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1019 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1020 else:
1020 else:
1021 if not wlock:
1021 if not wlock:
1022 wlock = self.wlock()
1022 wlock = self.wlock()
1023 if self.dirstate.state(dest) == '?':
1023 if self.dirstate.state(dest) == '?':
1024 self.dirstate.update([dest], "a")
1024 self.dirstate.update([dest], "a")
1025 self.dirstate.copy(source, dest)
1025 self.dirstate.copy(source, dest)
1026
1026
1027 def heads(self, start=None):
1027 def heads(self, start=None):
1028 heads = self.changelog.heads(start)
1028 heads = self.changelog.heads(start)
1029 # sort the output in rev descending order
1029 # sort the output in rev descending order
1030 heads = [(-self.changelog.rev(h), h) for h in heads]
1030 heads = [(-self.changelog.rev(h), h) for h in heads]
1031 heads.sort()
1031 heads.sort()
1032 return [n for (r, n) in heads]
1032 return [n for (r, n) in heads]
1033
1033
1034 def branches(self, nodes):
1034 def branches(self, nodes):
1035 if not nodes:
1035 if not nodes:
1036 nodes = [self.changelog.tip()]
1036 nodes = [self.changelog.tip()]
1037 b = []
1037 b = []
1038 for n in nodes:
1038 for n in nodes:
1039 t = n
1039 t = n
1040 while 1:
1040 while 1:
1041 p = self.changelog.parents(n)
1041 p = self.changelog.parents(n)
1042 if p[1] != nullid or p[0] == nullid:
1042 if p[1] != nullid or p[0] == nullid:
1043 b.append((t, n, p[0], p[1]))
1043 b.append((t, n, p[0], p[1]))
1044 break
1044 break
1045 n = p[0]
1045 n = p[0]
1046 return b
1046 return b
1047
1047
1048 def between(self, pairs):
1048 def between(self, pairs):
1049 r = []
1049 r = []
1050
1050
1051 for top, bottom in pairs:
1051 for top, bottom in pairs:
1052 n, l, i = top, [], 0
1052 n, l, i = top, [], 0
1053 f = 1
1053 f = 1
1054
1054
1055 while n != bottom:
1055 while n != bottom:
1056 p = self.changelog.parents(n)[0]
1056 p = self.changelog.parents(n)[0]
1057 if i == f:
1057 if i == f:
1058 l.append(n)
1058 l.append(n)
1059 f = f * 2
1059 f = f * 2
1060 n = p
1060 n = p
1061 i += 1
1061 i += 1
1062
1062
1063 r.append(l)
1063 r.append(l)
1064
1064
1065 return r
1065 return r
1066
1066
1067 def findincoming(self, remote, base=None, heads=None, force=False):
1067 def findincoming(self, remote, base=None, heads=None, force=False):
1068 """Return list of roots of the subsets of missing nodes from remote
1068 """Return list of roots of the subsets of missing nodes from remote
1069
1069
1070 If base dict is specified, assume that these nodes and their parents
1070 If base dict is specified, assume that these nodes and their parents
1071 exist on the remote side and that no child of a node of base exists
1071 exist on the remote side and that no child of a node of base exists
1072 in both remote and self.
1072 in both remote and self.
1073 Furthermore base will be updated to include the nodes that exists
1073 Furthermore base will be updated to include the nodes that exists
1074 in self and remote but no children exists in self and remote.
1074 in self and remote but no children exists in self and remote.
1075 If a list of heads is specified, return only nodes which are heads
1075 If a list of heads is specified, return only nodes which are heads
1076 or ancestors of these heads.
1076 or ancestors of these heads.
1077
1077
1078 All the ancestors of base are in self and in remote.
1078 All the ancestors of base are in self and in remote.
1079 All the descendants of the list returned are missing in self.
1079 All the descendants of the list returned are missing in self.
1080 (and so we know that the rest of the nodes are missing in remote, see
1080 (and so we know that the rest of the nodes are missing in remote, see
1081 outgoing)
1081 outgoing)
1082 """
1082 """
1083 m = self.changelog.nodemap
1083 m = self.changelog.nodemap
1084 search = []
1084 search = []
1085 fetch = {}
1085 fetch = {}
1086 seen = {}
1086 seen = {}
1087 seenbranch = {}
1087 seenbranch = {}
1088 if base == None:
1088 if base == None:
1089 base = {}
1089 base = {}
1090
1090
1091 if not heads:
1091 if not heads:
1092 heads = remote.heads()
1092 heads = remote.heads()
1093
1093
1094 if self.changelog.tip() == nullid:
1094 if self.changelog.tip() == nullid:
1095 base[nullid] = 1
1095 base[nullid] = 1
1096 if heads != [nullid]:
1096 if heads != [nullid]:
1097 return [nullid]
1097 return [nullid]
1098 return []
1098 return []
1099
1099
1100 # assume we're closer to the tip than the root
1100 # assume we're closer to the tip than the root
1101 # and start by examining the heads
1101 # and start by examining the heads
1102 self.ui.status(_("searching for changes\n"))
1102 self.ui.status(_("searching for changes\n"))
1103
1103
1104 unknown = []
1104 unknown = []
1105 for h in heads:
1105 for h in heads:
1106 if h not in m:
1106 if h not in m:
1107 unknown.append(h)
1107 unknown.append(h)
1108 else:
1108 else:
1109 base[h] = 1
1109 base[h] = 1
1110
1110
1111 if not unknown:
1111 if not unknown:
1112 return []
1112 return []
1113
1113
1114 req = dict.fromkeys(unknown)
1114 req = dict.fromkeys(unknown)
1115 reqcnt = 0
1115 reqcnt = 0
1116
1116
1117 # search through remote branches
1117 # search through remote branches
1118 # a 'branch' here is a linear segment of history, with four parts:
1118 # a 'branch' here is a linear segment of history, with four parts:
1119 # head, root, first parent, second parent
1119 # head, root, first parent, second parent
1120 # (a branch always has two parents (or none) by definition)
1120 # (a branch always has two parents (or none) by definition)
1121 unknown = remote.branches(unknown)
1121 unknown = remote.branches(unknown)
1122 while unknown:
1122 while unknown:
1123 r = []
1123 r = []
1124 while unknown:
1124 while unknown:
1125 n = unknown.pop(0)
1125 n = unknown.pop(0)
1126 if n[0] in seen:
1126 if n[0] in seen:
1127 continue
1127 continue
1128
1128
1129 self.ui.debug(_("examining %s:%s\n")
1129 self.ui.debug(_("examining %s:%s\n")
1130 % (short(n[0]), short(n[1])))
1130 % (short(n[0]), short(n[1])))
1131 if n[0] == nullid: # found the end of the branch
1131 if n[0] == nullid: # found the end of the branch
1132 pass
1132 pass
1133 elif n in seenbranch:
1133 elif n in seenbranch:
1134 self.ui.debug(_("branch already found\n"))
1134 self.ui.debug(_("branch already found\n"))
1135 continue
1135 continue
1136 elif n[1] and n[1] in m: # do we know the base?
1136 elif n[1] and n[1] in m: # do we know the base?
1137 self.ui.debug(_("found incomplete branch %s:%s\n")
1137 self.ui.debug(_("found incomplete branch %s:%s\n")
1138 % (short(n[0]), short(n[1])))
1138 % (short(n[0]), short(n[1])))
1139 search.append(n) # schedule branch range for scanning
1139 search.append(n) # schedule branch range for scanning
1140 seenbranch[n] = 1
1140 seenbranch[n] = 1
1141 else:
1141 else:
1142 if n[1] not in seen and n[1] not in fetch:
1142 if n[1] not in seen and n[1] not in fetch:
1143 if n[2] in m and n[3] in m:
1143 if n[2] in m and n[3] in m:
1144 self.ui.debug(_("found new changeset %s\n") %
1144 self.ui.debug(_("found new changeset %s\n") %
1145 short(n[1]))
1145 short(n[1]))
1146 fetch[n[1]] = 1 # earliest unknown
1146 fetch[n[1]] = 1 # earliest unknown
1147 for p in n[2:4]:
1147 for p in n[2:4]:
1148 if p in m:
1148 if p in m:
1149 base[p] = 1 # latest known
1149 base[p] = 1 # latest known
1150
1150
1151 for p in n[2:4]:
1151 for p in n[2:4]:
1152 if p not in req and p not in m:
1152 if p not in req and p not in m:
1153 r.append(p)
1153 r.append(p)
1154 req[p] = 1
1154 req[p] = 1
1155 seen[n[0]] = 1
1155 seen[n[0]] = 1
1156
1156
1157 if r:
1157 if r:
1158 reqcnt += 1
1158 reqcnt += 1
1159 self.ui.debug(_("request %d: %s\n") %
1159 self.ui.debug(_("request %d: %s\n") %
1160 (reqcnt, " ".join(map(short, r))))
1160 (reqcnt, " ".join(map(short, r))))
1161 for p in xrange(0, len(r), 10):
1161 for p in xrange(0, len(r), 10):
1162 for b in remote.branches(r[p:p+10]):
1162 for b in remote.branches(r[p:p+10]):
1163 self.ui.debug(_("received %s:%s\n") %
1163 self.ui.debug(_("received %s:%s\n") %
1164 (short(b[0]), short(b[1])))
1164 (short(b[0]), short(b[1])))
1165 unknown.append(b)
1165 unknown.append(b)
1166
1166
1167 # do binary search on the branches we found
1167 # do binary search on the branches we found
1168 while search:
1168 while search:
1169 n = search.pop(0)
1169 n = search.pop(0)
1170 reqcnt += 1
1170 reqcnt += 1
1171 l = remote.between([(n[0], n[1])])[0]
1171 l = remote.between([(n[0], n[1])])[0]
1172 l.append(n[1])
1172 l.append(n[1])
1173 p = n[0]
1173 p = n[0]
1174 f = 1
1174 f = 1
1175 for i in l:
1175 for i in l:
1176 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1176 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1177 if i in m:
1177 if i in m:
1178 if f <= 2:
1178 if f <= 2:
1179 self.ui.debug(_("found new branch changeset %s\n") %
1179 self.ui.debug(_("found new branch changeset %s\n") %
1180 short(p))
1180 short(p))
1181 fetch[p] = 1
1181 fetch[p] = 1
1182 base[i] = 1
1182 base[i] = 1
1183 else:
1183 else:
1184 self.ui.debug(_("narrowed branch search to %s:%s\n")
1184 self.ui.debug(_("narrowed branch search to %s:%s\n")
1185 % (short(p), short(i)))
1185 % (short(p), short(i)))
1186 search.append((p, i))
1186 search.append((p, i))
1187 break
1187 break
1188 p, f = i, f * 2
1188 p, f = i, f * 2
1189
1189
1190 # sanity check our fetch list
1190 # sanity check our fetch list
1191 for f in fetch.keys():
1191 for f in fetch.keys():
1192 if f in m:
1192 if f in m:
1193 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1193 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1194
1194
1195 if base.keys() == [nullid]:
1195 if base.keys() == [nullid]:
1196 if force:
1196 if force:
1197 self.ui.warn(_("warning: repository is unrelated\n"))
1197 self.ui.warn(_("warning: repository is unrelated\n"))
1198 else:
1198 else:
1199 raise util.Abort(_("repository is unrelated"))
1199 raise util.Abort(_("repository is unrelated"))
1200
1200
1201 self.ui.debug(_("found new changesets starting at ") +
1201 self.ui.debug(_("found new changesets starting at ") +
1202 " ".join([short(f) for f in fetch]) + "\n")
1202 " ".join([short(f) for f in fetch]) + "\n")
1203
1203
1204 self.ui.debug(_("%d total queries\n") % reqcnt)
1204 self.ui.debug(_("%d total queries\n") % reqcnt)
1205
1205
1206 return fetch.keys()
1206 return fetch.keys()
1207
1207
1208 def findoutgoing(self, remote, base=None, heads=None, force=False):
1208 def findoutgoing(self, remote, base=None, heads=None, force=False):
1209 """Return list of nodes that are roots of subsets not in remote
1209 """Return list of nodes that are roots of subsets not in remote
1210
1210
1211 If base dict is specified, assume that these nodes and their parents
1211 If base dict is specified, assume that these nodes and their parents
1212 exist on the remote side.
1212 exist on the remote side.
1213 If a list of heads is specified, return only nodes which are heads
1213 If a list of heads is specified, return only nodes which are heads
1214 or ancestors of these heads, and return a second element which
1214 or ancestors of these heads, and return a second element which
1215 contains all remote heads which get new children.
1215 contains all remote heads which get new children.
1216 """
1216 """
1217 if base == None:
1217 if base == None:
1218 base = {}
1218 base = {}
1219 self.findincoming(remote, base, heads, force=force)
1219 self.findincoming(remote, base, heads, force=force)
1220
1220
1221 self.ui.debug(_("common changesets up to ")
1221 self.ui.debug(_("common changesets up to ")
1222 + " ".join(map(short, base.keys())) + "\n")
1222 + " ".join(map(short, base.keys())) + "\n")
1223
1223
1224 remain = dict.fromkeys(self.changelog.nodemap)
1224 remain = dict.fromkeys(self.changelog.nodemap)
1225
1225
1226 # prune everything remote has from the tree
1226 # prune everything remote has from the tree
1227 del remain[nullid]
1227 del remain[nullid]
1228 remove = base.keys()
1228 remove = base.keys()
1229 while remove:
1229 while remove:
1230 n = remove.pop(0)
1230 n = remove.pop(0)
1231 if n in remain:
1231 if n in remain:
1232 del remain[n]
1232 del remain[n]
1233 for p in self.changelog.parents(n):
1233 for p in self.changelog.parents(n):
1234 remove.append(p)
1234 remove.append(p)
1235
1235
1236 # find every node whose parents have been pruned
1236 # find every node whose parents have been pruned
1237 subset = []
1237 subset = []
1238 # find every remote head that will get new children
1238 # find every remote head that will get new children
1239 updated_heads = {}
1239 updated_heads = {}
1240 for n in remain:
1240 for n in remain:
1241 p1, p2 = self.changelog.parents(n)
1241 p1, p2 = self.changelog.parents(n)
1242 if p1 not in remain and p2 not in remain:
1242 if p1 not in remain and p2 not in remain:
1243 subset.append(n)
1243 subset.append(n)
1244 if heads:
1244 if heads:
1245 if p1 in heads:
1245 if p1 in heads:
1246 updated_heads[p1] = True
1246 updated_heads[p1] = True
1247 if p2 in heads:
1247 if p2 in heads:
1248 updated_heads[p2] = True
1248 updated_heads[p2] = True
1249
1249
1250 # this is the set of all roots we have to push
1250 # this is the set of all roots we have to push
1251 if heads:
1251 if heads:
1252 return subset, updated_heads.keys()
1252 return subset, updated_heads.keys()
1253 else:
1253 else:
1254 return subset
1254 return subset
1255
1255
1256 def pull(self, remote, heads=None, force=False, lock=None):
1256 def pull(self, remote, heads=None, force=False, lock=None):
1257 mylock = False
1257 mylock = False
1258 if not lock:
1258 if not lock:
1259 lock = self.lock()
1259 lock = self.lock()
1260 mylock = True
1260 mylock = True
1261
1261
1262 try:
1262 try:
1263 fetch = self.findincoming(remote, force=force)
1263 fetch = self.findincoming(remote, force=force)
1264 if fetch == [nullid]:
1264 if fetch == [nullid]:
1265 self.ui.status(_("requesting all changes\n"))
1265 self.ui.status(_("requesting all changes\n"))
1266
1266
1267 if not fetch:
1267 if not fetch:
1268 self.ui.status(_("no changes found\n"))
1268 self.ui.status(_("no changes found\n"))
1269 return 0
1269 return 0
1270
1270
1271 if heads is None:
1271 if heads is None:
1272 cg = remote.changegroup(fetch, 'pull')
1272 cg = remote.changegroup(fetch, 'pull')
1273 else:
1273 else:
1274 if 'changegroupsubset' not in remote.capabilities:
1274 if 'changegroupsubset' not in remote.capabilities:
1275 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1275 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1276 cg = remote.changegroupsubset(fetch, heads, 'pull')
1276 cg = remote.changegroupsubset(fetch, heads, 'pull')
1277 return self.addchangegroup(cg, 'pull', remote.url())
1277 return self.addchangegroup(cg, 'pull', remote.url())
1278 finally:
1278 finally:
1279 if mylock:
1279 if mylock:
1280 lock.release()
1280 lock.release()
1281
1281
1282 def push(self, remote, force=False, revs=None):
1282 def push(self, remote, force=False, revs=None):
1283 # there are two ways to push to remote repo:
1283 # there are two ways to push to remote repo:
1284 #
1284 #
1285 # addchangegroup assumes local user can lock remote
1285 # addchangegroup assumes local user can lock remote
1286 # repo (local filesystem, old ssh servers).
1286 # repo (local filesystem, old ssh servers).
1287 #
1287 #
1288 # unbundle assumes local user cannot lock remote repo (new ssh
1288 # unbundle assumes local user cannot lock remote repo (new ssh
1289 # servers, http servers).
1289 # servers, http servers).
1290
1290
1291 if remote.capable('unbundle'):
1291 if remote.capable('unbundle'):
1292 return self.push_unbundle(remote, force, revs)
1292 return self.push_unbundle(remote, force, revs)
1293 return self.push_addchangegroup(remote, force, revs)
1293 return self.push_addchangegroup(remote, force, revs)
1294
1294
1295 def prepush(self, remote, force, revs):
1295 def prepush(self, remote, force, revs):
1296 base = {}
1296 base = {}
1297 remote_heads = remote.heads()
1297 remote_heads = remote.heads()
1298 inc = self.findincoming(remote, base, remote_heads, force=force)
1298 inc = self.findincoming(remote, base, remote_heads, force=force)
1299
1299
1300 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1300 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1301 if revs is not None:
1301 if revs is not None:
1302 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1302 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1303 else:
1303 else:
1304 bases, heads = update, self.changelog.heads()
1304 bases, heads = update, self.changelog.heads()
1305
1305
1306 if not bases:
1306 if not bases:
1307 self.ui.status(_("no changes found\n"))
1307 self.ui.status(_("no changes found\n"))
1308 return None, 1
1308 return None, 1
1309 elif not force:
1309 elif not force:
1310 # check if we're creating new remote heads
1310 # check if we're creating new remote heads
1311 # to be a remote head after push, node must be either
1311 # to be a remote head after push, node must be either
1312 # - unknown locally
1312 # - unknown locally
1313 # - a local outgoing head descended from update
1313 # - a local outgoing head descended from update
1314 # - a remote head that's known locally and not
1314 # - a remote head that's known locally and not
1315 # ancestral to an outgoing head
1315 # ancestral to an outgoing head
1316
1316
1317 warn = 0
1317 warn = 0
1318
1318
1319 if remote_heads == [nullid]:
1319 if remote_heads == [nullid]:
1320 warn = 0
1320 warn = 0
1321 elif not revs and len(heads) > len(remote_heads):
1321 elif not revs and len(heads) > len(remote_heads):
1322 warn = 1
1322 warn = 1
1323 else:
1323 else:
1324 newheads = list(heads)
1324 newheads = list(heads)
1325 for r in remote_heads:
1325 for r in remote_heads:
1326 if r in self.changelog.nodemap:
1326 if r in self.changelog.nodemap:
1327 desc = self.changelog.heads(r, heads)
1327 desc = self.changelog.heads(r, heads)
1328 l = [h for h in heads if h in desc]
1328 l = [h for h in heads if h in desc]
1329 if not l:
1329 if not l:
1330 newheads.append(r)
1330 newheads.append(r)
1331 else:
1331 else:
1332 newheads.append(r)
1332 newheads.append(r)
1333 if len(newheads) > len(remote_heads):
1333 if len(newheads) > len(remote_heads):
1334 warn = 1
1334 warn = 1
1335
1335
1336 if warn:
1336 if warn:
1337 self.ui.warn(_("abort: push creates new remote branches!\n"))
1337 self.ui.warn(_("abort: push creates new remote branches!\n"))
1338 self.ui.status(_("(did you forget to merge?"
1338 self.ui.status(_("(did you forget to merge?"
1339 " use push -f to force)\n"))
1339 " use push -f to force)\n"))
1340 return None, 1
1340 return None, 1
1341 elif inc:
1341 elif inc:
1342 self.ui.warn(_("note: unsynced remote changes!\n"))
1342 self.ui.warn(_("note: unsynced remote changes!\n"))
1343
1343
1344
1344
1345 if revs is None:
1345 if revs is None:
1346 cg = self.changegroup(update, 'push')
1346 cg = self.changegroup(update, 'push')
1347 else:
1347 else:
1348 cg = self.changegroupsubset(update, revs, 'push')
1348 cg = self.changegroupsubset(update, revs, 'push')
1349 return cg, remote_heads
1349 return cg, remote_heads
1350
1350
1351 def push_addchangegroup(self, remote, force, revs):
1351 def push_addchangegroup(self, remote, force, revs):
1352 lock = remote.lock()
1352 lock = remote.lock()
1353
1353
1354 ret = self.prepush(remote, force, revs)
1354 ret = self.prepush(remote, force, revs)
1355 if ret[0] is not None:
1355 if ret[0] is not None:
1356 cg, remote_heads = ret
1356 cg, remote_heads = ret
1357 return remote.addchangegroup(cg, 'push', self.url())
1357 return remote.addchangegroup(cg, 'push', self.url())
1358 return ret[1]
1358 return ret[1]
1359
1359
1360 def push_unbundle(self, remote, force, revs):
1360 def push_unbundle(self, remote, force, revs):
1361 # local repo finds heads on server, finds out what revs it
1361 # local repo finds heads on server, finds out what revs it
1362 # must push. once revs transferred, if server finds it has
1362 # must push. once revs transferred, if server finds it has
1363 # different heads (someone else won commit/push race), server
1363 # different heads (someone else won commit/push race), server
1364 # aborts.
1364 # aborts.
1365
1365
1366 ret = self.prepush(remote, force, revs)
1366 ret = self.prepush(remote, force, revs)
1367 if ret[0] is not None:
1367 if ret[0] is not None:
1368 cg, remote_heads = ret
1368 cg, remote_heads = ret
1369 if force: remote_heads = ['force']
1369 if force: remote_heads = ['force']
1370 return remote.unbundle(cg, remote_heads, 'push')
1370 return remote.unbundle(cg, remote_heads, 'push')
1371 return ret[1]
1371 return ret[1]
1372
1372
1373 def changegroupinfo(self, nodes):
1373 def changegroupinfo(self, nodes):
1374 self.ui.note(_("%d changesets found\n") % len(nodes))
1374 self.ui.note(_("%d changesets found\n") % len(nodes))
1375 if self.ui.debugflag:
1375 if self.ui.debugflag:
1376 self.ui.debug(_("List of changesets:\n"))
1376 self.ui.debug(_("List of changesets:\n"))
1377 for node in nodes:
1377 for node in nodes:
1378 self.ui.debug("%s\n" % hex(node))
1378 self.ui.debug("%s\n" % hex(node))
1379
1379
1380 def changegroupsubset(self, bases, heads, source):
1380 def changegroupsubset(self, bases, heads, source):
1381 """This function generates a changegroup consisting of all the nodes
1381 """This function generates a changegroup consisting of all the nodes
1382 that are descendents of any of the bases, and ancestors of any of
1382 that are descendents of any of the bases, and ancestors of any of
1383 the heads.
1383 the heads.
1384
1384
1385 It is fairly complex as determining which filenodes and which
1385 It is fairly complex as determining which filenodes and which
1386 manifest nodes need to be included for the changeset to be complete
1386 manifest nodes need to be included for the changeset to be complete
1387 is non-trivial.
1387 is non-trivial.
1388
1388
1389 Another wrinkle is doing the reverse, figuring out which changeset in
1389 Another wrinkle is doing the reverse, figuring out which changeset in
1390 the changegroup a particular filenode or manifestnode belongs to."""
1390 the changegroup a particular filenode or manifestnode belongs to."""
1391
1391
1392 self.hook('preoutgoing', throw=True, source=source)
1392 self.hook('preoutgoing', throw=True, source=source)
1393
1393
1394 # Set up some initial variables
1394 # Set up some initial variables
1395 # Make it easy to refer to self.changelog
1395 # Make it easy to refer to self.changelog
1396 cl = self.changelog
1396 cl = self.changelog
1397 # msng is short for missing - compute the list of changesets in this
1397 # msng is short for missing - compute the list of changesets in this
1398 # changegroup.
1398 # changegroup.
1399 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1399 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1400 self.changegroupinfo(msng_cl_lst)
1400 self.changegroupinfo(msng_cl_lst)
1401 # Some bases may turn out to be superfluous, and some heads may be
1401 # Some bases may turn out to be superfluous, and some heads may be
1402 # too. nodesbetween will return the minimal set of bases and heads
1402 # too. nodesbetween will return the minimal set of bases and heads
1403 # necessary to re-create the changegroup.
1403 # necessary to re-create the changegroup.
1404
1404
1405 # Known heads are the list of heads that it is assumed the recipient
1405 # Known heads are the list of heads that it is assumed the recipient
1406 # of this changegroup will know about.
1406 # of this changegroup will know about.
1407 knownheads = {}
1407 knownheads = {}
1408 # We assume that all parents of bases are known heads.
1408 # We assume that all parents of bases are known heads.
1409 for n in bases:
1409 for n in bases:
1410 for p in cl.parents(n):
1410 for p in cl.parents(n):
1411 if p != nullid:
1411 if p != nullid:
1412 knownheads[p] = 1
1412 knownheads[p] = 1
1413 knownheads = knownheads.keys()
1413 knownheads = knownheads.keys()
1414 if knownheads:
1414 if knownheads:
1415 # Now that we know what heads are known, we can compute which
1415 # Now that we know what heads are known, we can compute which
1416 # changesets are known. The recipient must know about all
1416 # changesets are known. The recipient must know about all
1417 # changesets required to reach the known heads from the null
1417 # changesets required to reach the known heads from the null
1418 # changeset.
1418 # changeset.
1419 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1419 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1420 junk = None
1420 junk = None
1421 # Transform the list into an ersatz set.
1421 # Transform the list into an ersatz set.
1422 has_cl_set = dict.fromkeys(has_cl_set)
1422 has_cl_set = dict.fromkeys(has_cl_set)
1423 else:
1423 else:
1424 # If there were no known heads, the recipient cannot be assumed to
1424 # If there were no known heads, the recipient cannot be assumed to
1425 # know about any changesets.
1425 # know about any changesets.
1426 has_cl_set = {}
1426 has_cl_set = {}
1427
1427
1428 # Make it easy to refer to self.manifest
1428 # Make it easy to refer to self.manifest
1429 mnfst = self.manifest
1429 mnfst = self.manifest
1430 # We don't know which manifests are missing yet
1430 # We don't know which manifests are missing yet
1431 msng_mnfst_set = {}
1431 msng_mnfst_set = {}
1432 # Nor do we know which filenodes are missing.
1432 # Nor do we know which filenodes are missing.
1433 msng_filenode_set = {}
1433 msng_filenode_set = {}
1434
1434
1435 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1435 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1436 junk = None
1436 junk = None
1437
1437
1438 # A changeset always belongs to itself, so the changenode lookup
1438 # A changeset always belongs to itself, so the changenode lookup
1439 # function for a changenode is identity.
1439 # function for a changenode is identity.
1440 def identity(x):
1440 def identity(x):
1441 return x
1441 return x
1442
1442
1443 # A function generating function. Sets up an environment for the
1443 # A function generating function. Sets up an environment for the
1444 # inner function.
1444 # inner function.
1445 def cmp_by_rev_func(revlog):
1445 def cmp_by_rev_func(revlog):
1446 # Compare two nodes by their revision number in the environment's
1446 # Compare two nodes by their revision number in the environment's
1447 # revision history. Since the revision number both represents the
1447 # revision history. Since the revision number both represents the
1448 # most efficient order to read the nodes in, and represents a
1448 # most efficient order to read the nodes in, and represents a
1449 # topological sorting of the nodes, this function is often useful.
1449 # topological sorting of the nodes, this function is often useful.
1450 def cmp_by_rev(a, b):
1450 def cmp_by_rev(a, b):
1451 return cmp(revlog.rev(a), revlog.rev(b))
1451 return cmp(revlog.rev(a), revlog.rev(b))
1452 return cmp_by_rev
1452 return cmp_by_rev
1453
1453
1454 # If we determine that a particular file or manifest node must be a
1454 # If we determine that a particular file or manifest node must be a
1455 # node that the recipient of the changegroup will already have, we can
1455 # node that the recipient of the changegroup will already have, we can
1456 # also assume the recipient will have all the parents. This function
1456 # also assume the recipient will have all the parents. This function
1457 # prunes them from the set of missing nodes.
1457 # prunes them from the set of missing nodes.
1458 def prune_parents(revlog, hasset, msngset):
1458 def prune_parents(revlog, hasset, msngset):
1459 haslst = hasset.keys()
1459 haslst = hasset.keys()
1460 haslst.sort(cmp_by_rev_func(revlog))
1460 haslst.sort(cmp_by_rev_func(revlog))
1461 for node in haslst:
1461 for node in haslst:
1462 parentlst = [p for p in revlog.parents(node) if p != nullid]
1462 parentlst = [p for p in revlog.parents(node) if p != nullid]
1463 while parentlst:
1463 while parentlst:
1464 n = parentlst.pop()
1464 n = parentlst.pop()
1465 if n not in hasset:
1465 if n not in hasset:
1466 hasset[n] = 1
1466 hasset[n] = 1
1467 p = [p for p in revlog.parents(n) if p != nullid]
1467 p = [p for p in revlog.parents(n) if p != nullid]
1468 parentlst.extend(p)
1468 parentlst.extend(p)
1469 for n in hasset:
1469 for n in hasset:
1470 msngset.pop(n, None)
1470 msngset.pop(n, None)
1471
1471
1472 # This is a function generating function used to set up an environment
1472 # This is a function generating function used to set up an environment
1473 # for the inner function to execute in.
1473 # for the inner function to execute in.
1474 def manifest_and_file_collector(changedfileset):
1474 def manifest_and_file_collector(changedfileset):
1475 # This is an information gathering function that gathers
1475 # This is an information gathering function that gathers
1476 # information from each changeset node that goes out as part of
1476 # information from each changeset node that goes out as part of
1477 # the changegroup. The information gathered is a list of which
1477 # the changegroup. The information gathered is a list of which
1478 # manifest nodes are potentially required (the recipient may
1478 # manifest nodes are potentially required (the recipient may
1479 # already have them) and total list of all files which were
1479 # already have them) and total list of all files which were
1480 # changed in any changeset in the changegroup.
1480 # changed in any changeset in the changegroup.
1481 #
1481 #
1482 # We also remember the first changenode we saw any manifest
1482 # We also remember the first changenode we saw any manifest
1483 # referenced by so we can later determine which changenode 'owns'
1483 # referenced by so we can later determine which changenode 'owns'
1484 # the manifest.
1484 # the manifest.
1485 def collect_manifests_and_files(clnode):
1485 def collect_manifests_and_files(clnode):
1486 c = cl.read(clnode)
1486 c = cl.read(clnode)
1487 for f in c[3]:
1487 for f in c[3]:
1488 # This is to make sure we only have one instance of each
1488 # This is to make sure we only have one instance of each
1489 # filename string for each filename.
1489 # filename string for each filename.
1490 changedfileset.setdefault(f, f)
1490 changedfileset.setdefault(f, f)
1491 msng_mnfst_set.setdefault(c[0], clnode)
1491 msng_mnfst_set.setdefault(c[0], clnode)
1492 return collect_manifests_and_files
1492 return collect_manifests_and_files
1493
1493
1494 # Figure out which manifest nodes (of the ones we think might be part
1494 # Figure out which manifest nodes (of the ones we think might be part
1495 # of the changegroup) the recipient must know about and remove them
1495 # of the changegroup) the recipient must know about and remove them
1496 # from the changegroup.
1496 # from the changegroup.
1497 def prune_manifests():
1497 def prune_manifests():
1498 has_mnfst_set = {}
1498 has_mnfst_set = {}
1499 for n in msng_mnfst_set:
1499 for n in msng_mnfst_set:
1500 # If a 'missing' manifest thinks it belongs to a changenode
1500 # If a 'missing' manifest thinks it belongs to a changenode
1501 # the recipient is assumed to have, obviously the recipient
1501 # the recipient is assumed to have, obviously the recipient
1502 # must have that manifest.
1502 # must have that manifest.
1503 linknode = cl.node(mnfst.linkrev(n))
1503 linknode = cl.node(mnfst.linkrev(n))
1504 if linknode in has_cl_set:
1504 if linknode in has_cl_set:
1505 has_mnfst_set[n] = 1
1505 has_mnfst_set[n] = 1
1506 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1506 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1507
1507
1508 # Use the information collected in collect_manifests_and_files to say
1508 # Use the information collected in collect_manifests_and_files to say
1509 # which changenode any manifestnode belongs to.
1509 # which changenode any manifestnode belongs to.
1510 def lookup_manifest_link(mnfstnode):
1510 def lookup_manifest_link(mnfstnode):
1511 return msng_mnfst_set[mnfstnode]
1511 return msng_mnfst_set[mnfstnode]
1512
1512
1513 # A function generating function that sets up the initial environment
1513 # A function generating function that sets up the initial environment
1514 # the inner function.
1514 # the inner function.
1515 def filenode_collector(changedfiles):
1515 def filenode_collector(changedfiles):
1516 next_rev = [0]
1516 next_rev = [0]
1517 # This gathers information from each manifestnode included in the
1517 # This gathers information from each manifestnode included in the
1518 # changegroup about which filenodes the manifest node references
1518 # changegroup about which filenodes the manifest node references
1519 # so we can include those in the changegroup too.
1519 # so we can include those in the changegroup too.
1520 #
1520 #
1521 # It also remembers which changenode each filenode belongs to. It
1521 # It also remembers which changenode each filenode belongs to. It
1522 # does this by assuming the a filenode belongs to the changenode
1522 # does this by assuming the a filenode belongs to the changenode
1523 # the first manifest that references it belongs to.
1523 # the first manifest that references it belongs to.
1524 def collect_msng_filenodes(mnfstnode):
1524 def collect_msng_filenodes(mnfstnode):
1525 r = mnfst.rev(mnfstnode)
1525 r = mnfst.rev(mnfstnode)
1526 if r == next_rev[0]:
1526 if r == next_rev[0]:
1527 # If the last rev we looked at was the one just previous,
1527 # If the last rev we looked at was the one just previous,
1528 # we only need to see a diff.
1528 # we only need to see a diff.
1529 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1529 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1530 # For each line in the delta
1530 # For each line in the delta
1531 for dline in delta.splitlines():
1531 for dline in delta.splitlines():
1532 # get the filename and filenode for that line
1532 # get the filename and filenode for that line
1533 f, fnode = dline.split('\0')
1533 f, fnode = dline.split('\0')
1534 fnode = bin(fnode[:40])
1534 fnode = bin(fnode[:40])
1535 f = changedfiles.get(f, None)
1535 f = changedfiles.get(f, None)
1536 # And if the file is in the list of files we care
1536 # And if the file is in the list of files we care
1537 # about.
1537 # about.
1538 if f is not None:
1538 if f is not None:
1539 # Get the changenode this manifest belongs to
1539 # Get the changenode this manifest belongs to
1540 clnode = msng_mnfst_set[mnfstnode]
1540 clnode = msng_mnfst_set[mnfstnode]
1541 # Create the set of filenodes for the file if
1541 # Create the set of filenodes for the file if
1542 # there isn't one already.
1542 # there isn't one already.
1543 ndset = msng_filenode_set.setdefault(f, {})
1543 ndset = msng_filenode_set.setdefault(f, {})
1544 # And set the filenode's changelog node to the
1544 # And set the filenode's changelog node to the
1545 # manifest's if it hasn't been set already.
1545 # manifest's if it hasn't been set already.
1546 ndset.setdefault(fnode, clnode)
1546 ndset.setdefault(fnode, clnode)
1547 else:
1547 else:
1548 # Otherwise we need a full manifest.
1548 # Otherwise we need a full manifest.
1549 m = mnfst.read(mnfstnode)
1549 m = mnfst.read(mnfstnode)
1550 # For every file in we care about.
1550 # For every file in we care about.
1551 for f in changedfiles:
1551 for f in changedfiles:
1552 fnode = m.get(f, None)
1552 fnode = m.get(f, None)
1553 # If it's in the manifest
1553 # If it's in the manifest
1554 if fnode is not None:
1554 if fnode is not None:
1555 # See comments above.
1555 # See comments above.
1556 clnode = msng_mnfst_set[mnfstnode]
1556 clnode = msng_mnfst_set[mnfstnode]
1557 ndset = msng_filenode_set.setdefault(f, {})
1557 ndset = msng_filenode_set.setdefault(f, {})
1558 ndset.setdefault(fnode, clnode)
1558 ndset.setdefault(fnode, clnode)
1559 # Remember the revision we hope to see next.
1559 # Remember the revision we hope to see next.
1560 next_rev[0] = r + 1
1560 next_rev[0] = r + 1
1561 return collect_msng_filenodes
1561 return collect_msng_filenodes
1562
1562
1563 # We have a list of filenodes we think we need for a file, lets remove
1563 # We have a list of filenodes we think we need for a file, lets remove
1564 # all those we now the recipient must have.
1564 # all those we now the recipient must have.
1565 def prune_filenodes(f, filerevlog):
1565 def prune_filenodes(f, filerevlog):
1566 msngset = msng_filenode_set[f]
1566 msngset = msng_filenode_set[f]
1567 hasset = {}
1567 hasset = {}
1568 # If a 'missing' filenode thinks it belongs to a changenode we
1568 # If a 'missing' filenode thinks it belongs to a changenode we
1569 # assume the recipient must have, then the recipient must have
1569 # assume the recipient must have, then the recipient must have
1570 # that filenode.
1570 # that filenode.
1571 for n in msngset:
1571 for n in msngset:
1572 clnode = cl.node(filerevlog.linkrev(n))
1572 clnode = cl.node(filerevlog.linkrev(n))
1573 if clnode in has_cl_set:
1573 if clnode in has_cl_set:
1574 hasset[n] = 1
1574 hasset[n] = 1
1575 prune_parents(filerevlog, hasset, msngset)
1575 prune_parents(filerevlog, hasset, msngset)
1576
1576
1577 # A function generator function that sets up the a context for the
1577 # A function generator function that sets up the a context for the
1578 # inner function.
1578 # inner function.
1579 def lookup_filenode_link_func(fname):
1579 def lookup_filenode_link_func(fname):
1580 msngset = msng_filenode_set[fname]
1580 msngset = msng_filenode_set[fname]
1581 # Lookup the changenode the filenode belongs to.
1581 # Lookup the changenode the filenode belongs to.
1582 def lookup_filenode_link(fnode):
1582 def lookup_filenode_link(fnode):
1583 return msngset[fnode]
1583 return msngset[fnode]
1584 return lookup_filenode_link
1584 return lookup_filenode_link
1585
1585
1586 # Now that we have all theses utility functions to help out and
1586 # Now that we have all theses utility functions to help out and
1587 # logically divide up the task, generate the group.
1587 # logically divide up the task, generate the group.
1588 def gengroup():
1588 def gengroup():
1589 # The set of changed files starts empty.
1589 # The set of changed files starts empty.
1590 changedfiles = {}
1590 changedfiles = {}
1591 # Create a changenode group generator that will call our functions
1591 # Create a changenode group generator that will call our functions
1592 # back to lookup the owning changenode and collect information.
1592 # back to lookup the owning changenode and collect information.
1593 group = cl.group(msng_cl_lst, identity,
1593 group = cl.group(msng_cl_lst, identity,
1594 manifest_and_file_collector(changedfiles))
1594 manifest_and_file_collector(changedfiles))
1595 for chnk in group:
1595 for chnk in group:
1596 yield chnk
1596 yield chnk
1597
1597
1598 # The list of manifests has been collected by the generator
1598 # The list of manifests has been collected by the generator
1599 # calling our functions back.
1599 # calling our functions back.
1600 prune_manifests()
1600 prune_manifests()
1601 msng_mnfst_lst = msng_mnfst_set.keys()
1601 msng_mnfst_lst = msng_mnfst_set.keys()
1602 # Sort the manifestnodes by revision number.
1602 # Sort the manifestnodes by revision number.
1603 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1603 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1604 # Create a generator for the manifestnodes that calls our lookup
1604 # Create a generator for the manifestnodes that calls our lookup
1605 # and data collection functions back.
1605 # and data collection functions back.
1606 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1606 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1607 filenode_collector(changedfiles))
1607 filenode_collector(changedfiles))
1608 for chnk in group:
1608 for chnk in group:
1609 yield chnk
1609 yield chnk
1610
1610
1611 # These are no longer needed, dereference and toss the memory for
1611 # These are no longer needed, dereference and toss the memory for
1612 # them.
1612 # them.
1613 msng_mnfst_lst = None
1613 msng_mnfst_lst = None
1614 msng_mnfst_set.clear()
1614 msng_mnfst_set.clear()
1615
1615
1616 changedfiles = changedfiles.keys()
1616 changedfiles = changedfiles.keys()
1617 changedfiles.sort()
1617 changedfiles.sort()
1618 # Go through all our files in order sorted by name.
1618 # Go through all our files in order sorted by name.
1619 for fname in changedfiles:
1619 for fname in changedfiles:
1620 filerevlog = self.file(fname)
1620 filerevlog = self.file(fname)
1621 # Toss out the filenodes that the recipient isn't really
1621 # Toss out the filenodes that the recipient isn't really
1622 # missing.
1622 # missing.
1623 if msng_filenode_set.has_key(fname):
1623 if msng_filenode_set.has_key(fname):
1624 prune_filenodes(fname, filerevlog)
1624 prune_filenodes(fname, filerevlog)
1625 msng_filenode_lst = msng_filenode_set[fname].keys()
1625 msng_filenode_lst = msng_filenode_set[fname].keys()
1626 else:
1626 else:
1627 msng_filenode_lst = []
1627 msng_filenode_lst = []
1628 # If any filenodes are left, generate the group for them,
1628 # If any filenodes are left, generate the group for them,
1629 # otherwise don't bother.
1629 # otherwise don't bother.
1630 if len(msng_filenode_lst) > 0:
1630 if len(msng_filenode_lst) > 0:
1631 yield changegroup.genchunk(fname)
1631 yield changegroup.genchunk(fname)
1632 # Sort the filenodes by their revision #
1632 # Sort the filenodes by their revision #
1633 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1633 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1634 # Create a group generator and only pass in a changenode
1634 # Create a group generator and only pass in a changenode
1635 # lookup function as we need to collect no information
1635 # lookup function as we need to collect no information
1636 # from filenodes.
1636 # from filenodes.
1637 group = filerevlog.group(msng_filenode_lst,
1637 group = filerevlog.group(msng_filenode_lst,
1638 lookup_filenode_link_func(fname))
1638 lookup_filenode_link_func(fname))
1639 for chnk in group:
1639 for chnk in group:
1640 yield chnk
1640 yield chnk
1641 if msng_filenode_set.has_key(fname):
1641 if msng_filenode_set.has_key(fname):
1642 # Don't need this anymore, toss it to free memory.
1642 # Don't need this anymore, toss it to free memory.
1643 del msng_filenode_set[fname]
1643 del msng_filenode_set[fname]
1644 # Signal that no more groups are left.
1644 # Signal that no more groups are left.
1645 yield changegroup.closechunk()
1645 yield changegroup.closechunk()
1646
1646
1647 if msng_cl_lst:
1647 if msng_cl_lst:
1648 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1648 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1649
1649
1650 return util.chunkbuffer(gengroup())
1650 return util.chunkbuffer(gengroup())
1651
1651
1652 def changegroup(self, basenodes, source):
1652 def changegroup(self, basenodes, source):
1653 """Generate a changegroup of all nodes that we have that a recipient
1653 """Generate a changegroup of all nodes that we have that a recipient
1654 doesn't.
1654 doesn't.
1655
1655
1656 This is much easier than the previous function as we can assume that
1656 This is much easier than the previous function as we can assume that
1657 the recipient has any changenode we aren't sending them."""
1657 the recipient has any changenode we aren't sending them."""
1658
1658
1659 self.hook('preoutgoing', throw=True, source=source)
1659 self.hook('preoutgoing', throw=True, source=source)
1660
1660
1661 cl = self.changelog
1661 cl = self.changelog
1662 nodes = cl.nodesbetween(basenodes, None)[0]
1662 nodes = cl.nodesbetween(basenodes, None)[0]
1663 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1663 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1664 self.changegroupinfo(nodes)
1664 self.changegroupinfo(nodes)
1665
1665
1666 def identity(x):
1666 def identity(x):
1667 return x
1667 return x
1668
1668
1669 def gennodelst(revlog):
1669 def gennodelst(revlog):
1670 for r in xrange(0, revlog.count()):
1670 for r in xrange(0, revlog.count()):
1671 n = revlog.node(r)
1671 n = revlog.node(r)
1672 if revlog.linkrev(n) in revset:
1672 if revlog.linkrev(n) in revset:
1673 yield n
1673 yield n
1674
1674
1675 def changed_file_collector(changedfileset):
1675 def changed_file_collector(changedfileset):
1676 def collect_changed_files(clnode):
1676 def collect_changed_files(clnode):
1677 c = cl.read(clnode)
1677 c = cl.read(clnode)
1678 for fname in c[3]:
1678 for fname in c[3]:
1679 changedfileset[fname] = 1
1679 changedfileset[fname] = 1
1680 return collect_changed_files
1680 return collect_changed_files
1681
1681
1682 def lookuprevlink_func(revlog):
1682 def lookuprevlink_func(revlog):
1683 def lookuprevlink(n):
1683 def lookuprevlink(n):
1684 return cl.node(revlog.linkrev(n))
1684 return cl.node(revlog.linkrev(n))
1685 return lookuprevlink
1685 return lookuprevlink
1686
1686
1687 def gengroup():
1687 def gengroup():
1688 # construct a list of all changed files
1688 # construct a list of all changed files
1689 changedfiles = {}
1689 changedfiles = {}
1690
1690
1691 for chnk in cl.group(nodes, identity,
1691 for chnk in cl.group(nodes, identity,
1692 changed_file_collector(changedfiles)):
1692 changed_file_collector(changedfiles)):
1693 yield chnk
1693 yield chnk
1694 changedfiles = changedfiles.keys()
1694 changedfiles = changedfiles.keys()
1695 changedfiles.sort()
1695 changedfiles.sort()
1696
1696
1697 mnfst = self.manifest
1697 mnfst = self.manifest
1698 nodeiter = gennodelst(mnfst)
1698 nodeiter = gennodelst(mnfst)
1699 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1699 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1700 yield chnk
1700 yield chnk
1701
1701
1702 for fname in changedfiles:
1702 for fname in changedfiles:
1703 filerevlog = self.file(fname)
1703 filerevlog = self.file(fname)
1704 nodeiter = gennodelst(filerevlog)
1704 nodeiter = gennodelst(filerevlog)
1705 nodeiter = list(nodeiter)
1705 nodeiter = list(nodeiter)
1706 if nodeiter:
1706 if nodeiter:
1707 yield changegroup.genchunk(fname)
1707 yield changegroup.genchunk(fname)
1708 lookup = lookuprevlink_func(filerevlog)
1708 lookup = lookuprevlink_func(filerevlog)
1709 for chnk in filerevlog.group(nodeiter, lookup):
1709 for chnk in filerevlog.group(nodeiter, lookup):
1710 yield chnk
1710 yield chnk
1711
1711
1712 yield changegroup.closechunk()
1712 yield changegroup.closechunk()
1713
1713
1714 if nodes:
1714 if nodes:
1715 self.hook('outgoing', node=hex(nodes[0]), source=source)
1715 self.hook('outgoing', node=hex(nodes[0]), source=source)
1716
1716
1717 return util.chunkbuffer(gengroup())
1717 return util.chunkbuffer(gengroup())
1718
1718
1719 def addchangegroup(self, source, srctype, url):
1719 def addchangegroup(self, source, srctype, url):
1720 """add changegroup to repo.
1720 """add changegroup to repo.
1721
1721
1722 return values:
1722 return values:
1723 - nothing changed or no source: 0
1723 - nothing changed or no source: 0
1724 - more heads than before: 1+added heads (2..n)
1724 - more heads than before: 1+added heads (2..n)
1725 - less heads than before: -1-removed heads (-2..-n)
1725 - less heads than before: -1-removed heads (-2..-n)
1726 - number of heads stays the same: 1
1726 - number of heads stays the same: 1
1727 """
1727 """
1728 def csmap(x):
1728 def csmap(x):
1729 self.ui.debug(_("add changeset %s\n") % short(x))
1729 self.ui.debug(_("add changeset %s\n") % short(x))
1730 return cl.count()
1730 return cl.count()
1731
1731
1732 def revmap(x):
1732 def revmap(x):
1733 return cl.rev(x)
1733 return cl.rev(x)
1734
1734
1735 if not source:
1735 if not source:
1736 return 0
1736 return 0
1737
1737
1738 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1738 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1739
1739
1740 changesets = files = revisions = 0
1740 changesets = files = revisions = 0
1741
1741
1742 tr = self.transaction()
1742 tr = self.transaction()
1743
1743
1744 # write changelog data to temp files so concurrent readers will not see
1744 # write changelog data to temp files so concurrent readers will not see
1745 # inconsistent view
1745 # inconsistent view
1746 cl = None
1746 cl = None
1747 try:
1747 try:
1748 cl = appendfile.appendchangelog(self.sopener,
1748 cl = appendfile.appendchangelog(self.sopener,
1749 self.changelog.version)
1749 self.changelog.version)
1750
1750
1751 oldheads = len(cl.heads())
1751 oldheads = len(cl.heads())
1752
1752
1753 # pull off the changeset group
1753 # pull off the changeset group
1754 self.ui.status(_("adding changesets\n"))
1754 self.ui.status(_("adding changesets\n"))
1755 cor = cl.count() - 1
1755 cor = cl.count() - 1
1756 chunkiter = changegroup.chunkiter(source)
1756 chunkiter = changegroup.chunkiter(source)
1757 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1757 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1758 raise util.Abort(_("received changelog group is empty"))
1758 raise util.Abort(_("received changelog group is empty"))
1759 cnr = cl.count() - 1
1759 cnr = cl.count() - 1
1760 changesets = cnr - cor
1760 changesets = cnr - cor
1761
1761
1762 # pull off the manifest group
1762 # pull off the manifest group
1763 self.ui.status(_("adding manifests\n"))
1763 self.ui.status(_("adding manifests\n"))
1764 chunkiter = changegroup.chunkiter(source)
1764 chunkiter = changegroup.chunkiter(source)
1765 # no need to check for empty manifest group here:
1765 # no need to check for empty manifest group here:
1766 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1766 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1767 # no new manifest will be created and the manifest group will
1767 # no new manifest will be created and the manifest group will
1768 # be empty during the pull
1768 # be empty during the pull
1769 self.manifest.addgroup(chunkiter, revmap, tr)
1769 self.manifest.addgroup(chunkiter, revmap, tr)
1770
1770
1771 # process the files
1771 # process the files
1772 self.ui.status(_("adding file changes\n"))
1772 self.ui.status(_("adding file changes\n"))
1773 while 1:
1773 while 1:
1774 f = changegroup.getchunk(source)
1774 f = changegroup.getchunk(source)
1775 if not f:
1775 if not f:
1776 break
1776 break
1777 self.ui.debug(_("adding %s revisions\n") % f)
1777 self.ui.debug(_("adding %s revisions\n") % f)
1778 fl = self.file(f)
1778 fl = self.file(f)
1779 o = fl.count()
1779 o = fl.count()
1780 chunkiter = changegroup.chunkiter(source)
1780 chunkiter = changegroup.chunkiter(source)
1781 if fl.addgroup(chunkiter, revmap, tr) is None:
1781 if fl.addgroup(chunkiter, revmap, tr) is None:
1782 raise util.Abort(_("received file revlog group is empty"))
1782 raise util.Abort(_("received file revlog group is empty"))
1783 revisions += fl.count() - o
1783 revisions += fl.count() - o
1784 files += 1
1784 files += 1
1785
1785
1786 cl.writedata()
1786 cl.writedata()
1787 finally:
1787 finally:
1788 if cl:
1788 if cl:
1789 cl.cleanup()
1789 cl.cleanup()
1790
1790
1791 # make changelog see real files again
1791 # make changelog see real files again
1792 self.changelog = changelog.changelog(self.sopener,
1792 self.changelog = changelog.changelog(self.sopener,
1793 self.changelog.version)
1793 self.changelog.version)
1794 self.changelog.checkinlinesize(tr)
1794 self.changelog.checkinlinesize(tr)
1795
1795
1796 newheads = len(self.changelog.heads())
1796 newheads = len(self.changelog.heads())
1797 heads = ""
1797 heads = ""
1798 if oldheads and newheads != oldheads:
1798 if oldheads and newheads != oldheads:
1799 heads = _(" (%+d heads)") % (newheads - oldheads)
1799 heads = _(" (%+d heads)") % (newheads - oldheads)
1800
1800
1801 self.ui.status(_("added %d changesets"
1801 self.ui.status(_("added %d changesets"
1802 " with %d changes to %d files%s\n")
1802 " with %d changes to %d files%s\n")
1803 % (changesets, revisions, files, heads))
1803 % (changesets, revisions, files, heads))
1804
1804
1805 if changesets > 0:
1805 if changesets > 0:
1806 self.hook('pretxnchangegroup', throw=True,
1806 self.hook('pretxnchangegroup', throw=True,
1807 node=hex(self.changelog.node(cor+1)), source=srctype,
1807 node=hex(self.changelog.node(cor+1)), source=srctype,
1808 url=url)
1808 url=url)
1809
1809
1810 tr.close()
1810 tr.close()
1811
1811
1812 if changesets > 0:
1812 if changesets > 0:
1813 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1813 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1814 source=srctype, url=url)
1814 source=srctype, url=url)
1815
1815
1816 for i in xrange(cor + 1, cnr + 1):
1816 for i in xrange(cor + 1, cnr + 1):
1817 self.hook("incoming", node=hex(self.changelog.node(i)),
1817 self.hook("incoming", node=hex(self.changelog.node(i)),
1818 source=srctype, url=url)
1818 source=srctype, url=url)
1819
1819
1820 # never return 0 here:
1820 # never return 0 here:
1821 if newheads < oldheads:
1821 if newheads < oldheads:
1822 return newheads - oldheads - 1
1822 return newheads - oldheads - 1
1823 else:
1823 else:
1824 return newheads - oldheads + 1
1824 return newheads - oldheads + 1
1825
1825
1826
1826
1827 def stream_in(self, remote):
1827 def stream_in(self, remote):
1828 fp = remote.stream_out()
1828 fp = remote.stream_out()
1829 l = fp.readline()
1829 l = fp.readline()
1830 try:
1830 try:
1831 resp = int(l)
1831 resp = int(l)
1832 except ValueError:
1832 except ValueError:
1833 raise util.UnexpectedOutput(
1833 raise util.UnexpectedOutput(
1834 _('Unexpected response from remote server:'), l)
1834 _('Unexpected response from remote server:'), l)
1835 if resp == 1:
1835 if resp == 1:
1836 raise util.Abort(_('operation forbidden by server'))
1836 raise util.Abort(_('operation forbidden by server'))
1837 elif resp == 2:
1837 elif resp == 2:
1838 raise util.Abort(_('locking the remote repository failed'))
1838 raise util.Abort(_('locking the remote repository failed'))
1839 elif resp != 0:
1839 elif resp != 0:
1840 raise util.Abort(_('the server sent an unknown error code'))
1840 raise util.Abort(_('the server sent an unknown error code'))
1841 self.ui.status(_('streaming all changes\n'))
1841 self.ui.status(_('streaming all changes\n'))
1842 l = fp.readline()
1842 l = fp.readline()
1843 try:
1843 try:
1844 total_files, total_bytes = map(int, l.split(' ', 1))
1844 total_files, total_bytes = map(int, l.split(' ', 1))
1845 except ValueError, TypeError:
1845 except ValueError, TypeError:
1846 raise util.UnexpectedOutput(
1846 raise util.UnexpectedOutput(
1847 _('Unexpected response from remote server:'), l)
1847 _('Unexpected response from remote server:'), l)
1848 self.ui.status(_('%d files to transfer, %s of data\n') %
1848 self.ui.status(_('%d files to transfer, %s of data\n') %
1849 (total_files, util.bytecount(total_bytes)))
1849 (total_files, util.bytecount(total_bytes)))
1850 start = time.time()
1850 start = time.time()
1851 for i in xrange(total_files):
1851 for i in xrange(total_files):
1852 # XXX doesn't support '\n' or '\r' in filenames
1852 # XXX doesn't support '\n' or '\r' in filenames
1853 l = fp.readline()
1853 l = fp.readline()
1854 try:
1854 try:
1855 name, size = l.split('\0', 1)
1855 name, size = l.split('\0', 1)
1856 size = int(size)
1856 size = int(size)
1857 except ValueError, TypeError:
1857 except ValueError, TypeError:
1858 raise util.UnexpectedOutput(
1858 raise util.UnexpectedOutput(
1859 _('Unexpected response from remote server:'), l)
1859 _('Unexpected response from remote server:'), l)
1860 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1860 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1861 ofp = self.sopener(name, 'w')
1861 ofp = self.sopener(name, 'w')
1862 for chunk in util.filechunkiter(fp, limit=size):
1862 for chunk in util.filechunkiter(fp, limit=size):
1863 ofp.write(chunk)
1863 ofp.write(chunk)
1864 ofp.close()
1864 ofp.close()
1865 elapsed = time.time() - start
1865 elapsed = time.time() - start
1866 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1866 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1867 (util.bytecount(total_bytes), elapsed,
1867 (util.bytecount(total_bytes), elapsed,
1868 util.bytecount(total_bytes / elapsed)))
1868 util.bytecount(total_bytes / elapsed)))
1869 self.reload()
1869 self.reload()
1870 return len(self.heads()) + 1
1870 return len(self.heads()) + 1
1871
1871
1872 def clone(self, remote, heads=[], stream=False):
1872 def clone(self, remote, heads=[], stream=False):
1873 '''clone remote repository.
1873 '''clone remote repository.
1874
1874
1875 keyword arguments:
1875 keyword arguments:
1876 heads: list of revs to clone (forces use of pull)
1876 heads: list of revs to clone (forces use of pull)
1877 stream: use streaming clone if possible'''
1877 stream: use streaming clone if possible'''
1878
1878
1879 # now, all clients that can request uncompressed clones can
1879 # now, all clients that can request uncompressed clones can
1880 # read repo formats supported by all servers that can serve
1880 # read repo formats supported by all servers that can serve
1881 # them.
1881 # them.
1882
1882
1883 # if revlog format changes, client will have to check version
1883 # if revlog format changes, client will have to check version
1884 # and format flags on "stream" capability, and use
1884 # and format flags on "stream" capability, and use
1885 # uncompressed only if compatible.
1885 # uncompressed only if compatible.
1886
1886
1887 if stream and not heads and remote.capable('stream'):
1887 if stream and not heads and remote.capable('stream'):
1888 return self.stream_in(remote)
1888 return self.stream_in(remote)
1889 return self.pull(remote, heads)
1889 return self.pull(remote, heads)
1890
1890
1891 # used to avoid circular references so destructors work
1891 # used to avoid circular references so destructors work
1892 def aftertrans(files):
1892 def aftertrans(files):
1893 renamefiles = [tuple(t) for t in files]
1893 renamefiles = [tuple(t) for t in files]
1894 def a():
1894 def a():
1895 for src, dest in renamefiles:
1895 for src, dest in renamefiles:
1896 util.rename(src, dest)
1896 util.rename(src, dest)
1897 return a
1897 return a
1898
1898
1899 def instance(ui, path, create):
1899 def instance(ui, path, create):
1900 return localrepository(ui, util.drop_scheme('file', path), create)
1900 return localrepository(ui, util.drop_scheme('file', path), create)
1901
1901
1902 def islocal(path):
1902 def islocal(path):
1903 return True
1903 return True
General Comments 0
You need to be logged in to leave comments. Login now