##// END OF EJS Templates
Merge branchname changes in localrepo.commit.
Thomas Arendsen Hein -
r4022:bf329bda merge default
parent child Browse files
Show More
@@ -1,1880 +1,1885 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.filterpats = {}
120 self.filterpats = {}
121 self.transhandle = None
121 self.transhandle = None
122
122
123 self._link = lambda x: False
123 self._link = lambda x: False
124 if util.checklink(self.root):
124 if util.checklink(self.root):
125 r = self.root # avoid circular reference in lambda
125 r = self.root # avoid circular reference in lambda
126 self._link = lambda x: util.is_link(os.path.join(r, x))
126 self._link = lambda x: util.is_link(os.path.join(r, x))
127
127
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129
129
130 def url(self):
130 def url(self):
131 return 'file:' + self.root
131 return 'file:' + self.root
132
132
133 def hook(self, name, throw=False, **args):
133 def hook(self, name, throw=False, **args):
134 def callhook(hname, funcname):
134 def callhook(hname, funcname):
135 '''call python hook. hook is callable object, looked up as
135 '''call python hook. hook is callable object, looked up as
136 name in python module. if callable returns "true", hook
136 name in python module. if callable returns "true", hook
137 fails, else passes. if hook raises exception, treated as
137 fails, else passes. if hook raises exception, treated as
138 hook failure. exception propagates if throw is "true".
138 hook failure. exception propagates if throw is "true".
139
139
140 reason for "true" meaning "hook failed" is so that
140 reason for "true" meaning "hook failed" is so that
141 unmodified commands (e.g. mercurial.commands.update) can
141 unmodified commands (e.g. mercurial.commands.update) can
142 be run as hooks without wrappers to convert return values.'''
142 be run as hooks without wrappers to convert return values.'''
143
143
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 d = funcname.rfind('.')
145 d = funcname.rfind('.')
146 if d == -1:
146 if d == -1:
147 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
147 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
148 % (hname, funcname))
148 % (hname, funcname))
149 modname = funcname[:d]
149 modname = funcname[:d]
150 try:
150 try:
151 obj = __import__(modname)
151 obj = __import__(modname)
152 except ImportError:
152 except ImportError:
153 try:
153 try:
154 # extensions are loaded with hgext_ prefix
154 # extensions are loaded with hgext_ prefix
155 obj = __import__("hgext_%s" % modname)
155 obj = __import__("hgext_%s" % modname)
156 except ImportError:
156 except ImportError:
157 raise util.Abort(_('%s hook is invalid '
157 raise util.Abort(_('%s hook is invalid '
158 '(import of "%s" failed)') %
158 '(import of "%s" failed)') %
159 (hname, modname))
159 (hname, modname))
160 try:
160 try:
161 for p in funcname.split('.')[1:]:
161 for p in funcname.split('.')[1:]:
162 obj = getattr(obj, p)
162 obj = getattr(obj, p)
163 except AttributeError, err:
163 except AttributeError, err:
164 raise util.Abort(_('%s hook is invalid '
164 raise util.Abort(_('%s hook is invalid '
165 '("%s" is not defined)') %
165 '("%s" is not defined)') %
166 (hname, funcname))
166 (hname, funcname))
167 if not callable(obj):
167 if not callable(obj):
168 raise util.Abort(_('%s hook is invalid '
168 raise util.Abort(_('%s hook is invalid '
169 '("%s" is not callable)') %
169 '("%s" is not callable)') %
170 (hname, funcname))
170 (hname, funcname))
171 try:
171 try:
172 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
172 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
173 except (KeyboardInterrupt, util.SignalInterrupt):
173 except (KeyboardInterrupt, util.SignalInterrupt):
174 raise
174 raise
175 except Exception, exc:
175 except Exception, exc:
176 if isinstance(exc, util.Abort):
176 if isinstance(exc, util.Abort):
177 self.ui.warn(_('error: %s hook failed: %s\n') %
177 self.ui.warn(_('error: %s hook failed: %s\n') %
178 (hname, exc.args[0]))
178 (hname, exc.args[0]))
179 else:
179 else:
180 self.ui.warn(_('error: %s hook raised an exception: '
180 self.ui.warn(_('error: %s hook raised an exception: '
181 '%s\n') % (hname, exc))
181 '%s\n') % (hname, exc))
182 if throw:
182 if throw:
183 raise
183 raise
184 self.ui.print_exc()
184 self.ui.print_exc()
185 return True
185 return True
186 if r:
186 if r:
187 if throw:
187 if throw:
188 raise util.Abort(_('%s hook failed') % hname)
188 raise util.Abort(_('%s hook failed') % hname)
189 self.ui.warn(_('warning: %s hook failed\n') % hname)
189 self.ui.warn(_('warning: %s hook failed\n') % hname)
190 return r
190 return r
191
191
192 def runhook(name, cmd):
192 def runhook(name, cmd):
193 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
193 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
194 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
194 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
195 r = util.system(cmd, environ=env, cwd=self.root)
195 r = util.system(cmd, environ=env, cwd=self.root)
196 if r:
196 if r:
197 desc, r = util.explain_exit(r)
197 desc, r = util.explain_exit(r)
198 if throw:
198 if throw:
199 raise util.Abort(_('%s hook %s') % (name, desc))
199 raise util.Abort(_('%s hook %s') % (name, desc))
200 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
200 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
201 return r
201 return r
202
202
203 r = False
203 r = False
204 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
204 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
205 if hname.split(".", 1)[0] == name and cmd]
205 if hname.split(".", 1)[0] == name and cmd]
206 hooks.sort()
206 hooks.sort()
207 for hname, cmd in hooks:
207 for hname, cmd in hooks:
208 if cmd.startswith('python:'):
208 if cmd.startswith('python:'):
209 r = callhook(hname, cmd[7:].strip()) or r
209 r = callhook(hname, cmd[7:].strip()) or r
210 else:
210 else:
211 r = runhook(hname, cmd) or r
211 r = runhook(hname, cmd) or r
212 return r
212 return r
213
213
214 tag_disallowed = ':\r\n'
214 tag_disallowed = ':\r\n'
215
215
216 def tag(self, name, node, message, local, user, date):
216 def tag(self, name, node, message, local, user, date):
217 '''tag a revision with a symbolic name.
217 '''tag a revision with a symbolic name.
218
218
219 if local is True, the tag is stored in a per-repository file.
219 if local is True, the tag is stored in a per-repository file.
220 otherwise, it is stored in the .hgtags file, and a new
220 otherwise, it is stored in the .hgtags file, and a new
221 changeset is committed with the change.
221 changeset is committed with the change.
222
222
223 keyword arguments:
223 keyword arguments:
224
224
225 local: whether to store tag in non-version-controlled file
225 local: whether to store tag in non-version-controlled file
226 (default False)
226 (default False)
227
227
228 message: commit message to use if committing
228 message: commit message to use if committing
229
229
230 user: name of user to use if committing
230 user: name of user to use if committing
231
231
232 date: date tuple to use if committing'''
232 date: date tuple to use if committing'''
233
233
234 for c in self.tag_disallowed:
234 for c in self.tag_disallowed:
235 if c in name:
235 if c in name:
236 raise util.Abort(_('%r cannot be used in a tag name') % c)
236 raise util.Abort(_('%r cannot be used in a tag name') % c)
237
237
238 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
238 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
239
239
240 if local:
240 if local:
241 # local tags are stored in the current charset
241 # local tags are stored in the current charset
242 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
242 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
243 self.hook('tag', node=hex(node), tag=name, local=local)
243 self.hook('tag', node=hex(node), tag=name, local=local)
244 return
244 return
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251 # committed tags are stored in UTF-8
251 # committed tags are stored in UTF-8
252 line = '%s %s\n' % (hex(node), util.fromlocal(name))
252 line = '%s %s\n' % (hex(node), util.fromlocal(name))
253 self.wfile('.hgtags', 'ab').write(line)
253 self.wfile('.hgtags', 'ab').write(line)
254 if self.dirstate.state('.hgtags') == '?':
254 if self.dirstate.state('.hgtags') == '?':
255 self.add(['.hgtags'])
255 self.add(['.hgtags'])
256
256
257 self.commit(['.hgtags'], message, user, date)
257 self.commit(['.hgtags'], message, user, date)
258 self.hook('tag', node=hex(node), tag=name, local=local)
258 self.hook('tag', node=hex(node), tag=name, local=local)
259
259
260 def tags(self):
260 def tags(self):
261 '''return a mapping of tag to node'''
261 '''return a mapping of tag to node'''
262 if not self.tagscache:
262 if not self.tagscache:
263 self.tagscache = {}
263 self.tagscache = {}
264
264
265 def parsetag(line, context):
265 def parsetag(line, context):
266 if not line:
266 if not line:
267 return
267 return
268 s = l.split(" ", 1)
268 s = l.split(" ", 1)
269 if len(s) != 2:
269 if len(s) != 2:
270 self.ui.warn(_("%s: cannot parse entry\n") % context)
270 self.ui.warn(_("%s: cannot parse entry\n") % context)
271 return
271 return
272 node, key = s
272 node, key = s
273 key = util.tolocal(key.strip()) # stored in UTF-8
273 key = util.tolocal(key.strip()) # stored in UTF-8
274 try:
274 try:
275 bin_n = bin(node)
275 bin_n = bin(node)
276 except TypeError:
276 except TypeError:
277 self.ui.warn(_("%s: node '%s' is not well formed\n") %
277 self.ui.warn(_("%s: node '%s' is not well formed\n") %
278 (context, node))
278 (context, node))
279 return
279 return
280 if bin_n not in self.changelog.nodemap:
280 if bin_n not in self.changelog.nodemap:
281 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
281 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
282 (context, key))
282 (context, key))
283 return
283 return
284 self.tagscache[key] = bin_n
284 self.tagscache[key] = bin_n
285
285
286 # read the tags file from each head, ending with the tip,
286 # read the tags file from each head, ending with the tip,
287 # and add each tag found to the map, with "newer" ones
287 # and add each tag found to the map, with "newer" ones
288 # taking precedence
288 # taking precedence
289 f = None
289 f = None
290 for rev, node, fnode in self._hgtagsnodes():
290 for rev, node, fnode in self._hgtagsnodes():
291 f = (f and f.filectx(fnode) or
291 f = (f and f.filectx(fnode) or
292 self.filectx('.hgtags', fileid=fnode))
292 self.filectx('.hgtags', fileid=fnode))
293 count = 0
293 count = 0
294 for l in f.data().splitlines():
294 for l in f.data().splitlines():
295 count += 1
295 count += 1
296 parsetag(l, _("%s, line %d") % (str(f), count))
296 parsetag(l, _("%s, line %d") % (str(f), count))
297
297
298 try:
298 try:
299 f = self.opener("localtags")
299 f = self.opener("localtags")
300 count = 0
300 count = 0
301 for l in f:
301 for l in f:
302 # localtags are stored in the local character set
302 # localtags are stored in the local character set
303 # while the internal tag table is stored in UTF-8
303 # while the internal tag table is stored in UTF-8
304 l = util.fromlocal(l)
304 l = util.fromlocal(l)
305 count += 1
305 count += 1
306 parsetag(l, _("localtags, line %d") % count)
306 parsetag(l, _("localtags, line %d") % count)
307 except IOError:
307 except IOError:
308 pass
308 pass
309
309
310 self.tagscache['tip'] = self.changelog.tip()
310 self.tagscache['tip'] = self.changelog.tip()
311
311
312 return self.tagscache
312 return self.tagscache
313
313
314 def _hgtagsnodes(self):
314 def _hgtagsnodes(self):
315 heads = self.heads()
315 heads = self.heads()
316 heads.reverse()
316 heads.reverse()
317 last = {}
317 last = {}
318 ret = []
318 ret = []
319 for node in heads:
319 for node in heads:
320 c = self.changectx(node)
320 c = self.changectx(node)
321 rev = c.rev()
321 rev = c.rev()
322 try:
322 try:
323 fnode = c.filenode('.hgtags')
323 fnode = c.filenode('.hgtags')
324 except revlog.LookupError:
324 except revlog.LookupError:
325 continue
325 continue
326 ret.append((rev, node, fnode))
326 ret.append((rev, node, fnode))
327 if fnode in last:
327 if fnode in last:
328 ret[last[fnode]] = None
328 ret[last[fnode]] = None
329 last[fnode] = len(ret) - 1
329 last[fnode] = len(ret) - 1
330 return [item for item in ret if item]
330 return [item for item in ret if item]
331
331
332 def tagslist(self):
332 def tagslist(self):
333 '''return a list of tags ordered by revision'''
333 '''return a list of tags ordered by revision'''
334 l = []
334 l = []
335 for t, n in self.tags().items():
335 for t, n in self.tags().items():
336 try:
336 try:
337 r = self.changelog.rev(n)
337 r = self.changelog.rev(n)
338 except:
338 except:
339 r = -2 # sort to the beginning of the list if unknown
339 r = -2 # sort to the beginning of the list if unknown
340 l.append((r, t, n))
340 l.append((r, t, n))
341 l.sort()
341 l.sort()
342 return [(t, n) for r, t, n in l]
342 return [(t, n) for r, t, n in l]
343
343
344 def nodetags(self, node):
344 def nodetags(self, node):
345 '''return the tags associated with a node'''
345 '''return the tags associated with a node'''
346 if not self.nodetagscache:
346 if not self.nodetagscache:
347 self.nodetagscache = {}
347 self.nodetagscache = {}
348 for t, n in self.tags().items():
348 for t, n in self.tags().items():
349 self.nodetagscache.setdefault(n, []).append(t)
349 self.nodetagscache.setdefault(n, []).append(t)
350 return self.nodetagscache.get(node, [])
350 return self.nodetagscache.get(node, [])
351
351
352 def _branchtags(self):
352 def _branchtags(self):
353 partial, last, lrev = self._readbranchcache()
353 partial, last, lrev = self._readbranchcache()
354
354
355 tiprev = self.changelog.count() - 1
355 tiprev = self.changelog.count() - 1
356 if lrev != tiprev:
356 if lrev != tiprev:
357 self._updatebranchcache(partial, lrev+1, tiprev+1)
357 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._writebranchcache(partial, self.changelog.tip(), tiprev)
358 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359
359
360 return partial
360 return partial
361
361
362 def branchtags(self):
362 def branchtags(self):
363 if self.branchcache is not None:
363 if self.branchcache is not None:
364 return self.branchcache
364 return self.branchcache
365
365
366 self.branchcache = {} # avoid recursion in changectx
366 self.branchcache = {} # avoid recursion in changectx
367 partial = self._branchtags()
367 partial = self._branchtags()
368
368
369 # the branch cache is stored on disk as UTF-8, but in the local
369 # the branch cache is stored on disk as UTF-8, but in the local
370 # charset internally
370 # charset internally
371 for k, v in partial.items():
371 for k, v in partial.items():
372 self.branchcache[util.tolocal(k)] = v
372 self.branchcache[util.tolocal(k)] = v
373 return self.branchcache
373 return self.branchcache
374
374
375 def _readbranchcache(self):
375 def _readbranchcache(self):
376 partial = {}
376 partial = {}
377 try:
377 try:
378 f = self.opener("branches.cache")
378 f = self.opener("branches.cache")
379 lines = f.read().split('\n')
379 lines = f.read().split('\n')
380 f.close()
380 f.close()
381 last, lrev = lines.pop(0).rstrip().split(" ", 1)
381 last, lrev = lines.pop(0).rstrip().split(" ", 1)
382 last, lrev = bin(last), int(lrev)
382 last, lrev = bin(last), int(lrev)
383 if not (lrev < self.changelog.count() and
383 if not (lrev < self.changelog.count() and
384 self.changelog.node(lrev) == last): # sanity check
384 self.changelog.node(lrev) == last): # sanity check
385 # invalidate the cache
385 # invalidate the cache
386 raise ValueError('Invalid branch cache: unknown tip')
386 raise ValueError('Invalid branch cache: unknown tip')
387 for l in lines:
387 for l in lines:
388 if not l: continue
388 if not l: continue
389 node, label = l.rstrip().split(" ", 1)
389 node, label = l.rstrip().split(" ", 1)
390 partial[label] = bin(node)
390 partial[label] = bin(node)
391 except (KeyboardInterrupt, util.SignalInterrupt):
391 except (KeyboardInterrupt, util.SignalInterrupt):
392 raise
392 raise
393 except Exception, inst:
393 except Exception, inst:
394 if self.ui.debugflag:
394 if self.ui.debugflag:
395 self.ui.warn(str(inst), '\n')
395 self.ui.warn(str(inst), '\n')
396 partial, last, lrev = {}, nullid, nullrev
396 partial, last, lrev = {}, nullid, nullrev
397 return partial, last, lrev
397 return partial, last, lrev
398
398
399 def _writebranchcache(self, branches, tip, tiprev):
399 def _writebranchcache(self, branches, tip, tiprev):
400 try:
400 try:
401 f = self.opener("branches.cache", "w")
401 f = self.opener("branches.cache", "w")
402 f.write("%s %s\n" % (hex(tip), tiprev))
402 f.write("%s %s\n" % (hex(tip), tiprev))
403 for label, node in branches.iteritems():
403 for label, node in branches.iteritems():
404 f.write("%s %s\n" % (hex(node), label))
404 f.write("%s %s\n" % (hex(node), label))
405 except IOError:
405 except IOError:
406 pass
406 pass
407
407
408 def _updatebranchcache(self, partial, start, end):
408 def _updatebranchcache(self, partial, start, end):
409 for r in xrange(start, end):
409 for r in xrange(start, end):
410 c = self.changectx(r)
410 c = self.changectx(r)
411 b = c.branch()
411 b = c.branch()
412 if b:
412 if b:
413 partial[b] = c.node()
413 partial[b] = c.node()
414
414
415 def lookup(self, key):
415 def lookup(self, key):
416 if key == '.':
416 if key == '.':
417 key = self.dirstate.parents()[0]
417 key = self.dirstate.parents()[0]
418 if key == nullid:
418 if key == nullid:
419 raise repo.RepoError(_("no revision checked out"))
419 raise repo.RepoError(_("no revision checked out"))
420 elif key == 'null':
420 elif key == 'null':
421 return nullid
421 return nullid
422 n = self.changelog._match(key)
422 n = self.changelog._match(key)
423 if n:
423 if n:
424 return n
424 return n
425 if key in self.tags():
425 if key in self.tags():
426 return self.tags()[key]
426 return self.tags()[key]
427 if key in self.branchtags():
427 if key in self.branchtags():
428 return self.branchtags()[key]
428 return self.branchtags()[key]
429 n = self.changelog._partialmatch(key)
429 n = self.changelog._partialmatch(key)
430 if n:
430 if n:
431 return n
431 return n
432 raise repo.RepoError(_("unknown revision '%s'") % key)
432 raise repo.RepoError(_("unknown revision '%s'") % key)
433
433
434 def dev(self):
434 def dev(self):
435 return os.lstat(self.path).st_dev
435 return os.lstat(self.path).st_dev
436
436
437 def local(self):
437 def local(self):
438 return True
438 return True
439
439
440 def join(self, f):
440 def join(self, f):
441 return os.path.join(self.path, f)
441 return os.path.join(self.path, f)
442
442
443 def sjoin(self, f):
443 def sjoin(self, f):
444 f = self.encodefn(f)
444 f = self.encodefn(f)
445 return os.path.join(self.spath, f)
445 return os.path.join(self.spath, f)
446
446
447 def wjoin(self, f):
447 def wjoin(self, f):
448 return os.path.join(self.root, f)
448 return os.path.join(self.root, f)
449
449
450 def file(self, f):
450 def file(self, f):
451 if f[0] == '/':
451 if f[0] == '/':
452 f = f[1:]
452 f = f[1:]
453 return filelog.filelog(self.sopener, f, self.revlogversion)
453 return filelog.filelog(self.sopener, f, self.revlogversion)
454
454
455 def changectx(self, changeid=None):
455 def changectx(self, changeid=None):
456 return context.changectx(self, changeid)
456 return context.changectx(self, changeid)
457
457
458 def workingctx(self):
458 def workingctx(self):
459 return context.workingctx(self)
459 return context.workingctx(self)
460
460
461 def parents(self, changeid=None):
461 def parents(self, changeid=None):
462 '''
462 '''
463 get list of changectxs for parents of changeid or working directory
463 get list of changectxs for parents of changeid or working directory
464 '''
464 '''
465 if changeid is None:
465 if changeid is None:
466 pl = self.dirstate.parents()
466 pl = self.dirstate.parents()
467 else:
467 else:
468 n = self.changelog.lookup(changeid)
468 n = self.changelog.lookup(changeid)
469 pl = self.changelog.parents(n)
469 pl = self.changelog.parents(n)
470 if pl[1] == nullid:
470 if pl[1] == nullid:
471 return [self.changectx(pl[0])]
471 return [self.changectx(pl[0])]
472 return [self.changectx(pl[0]), self.changectx(pl[1])]
472 return [self.changectx(pl[0]), self.changectx(pl[1])]
473
473
474 def filectx(self, path, changeid=None, fileid=None):
474 def filectx(self, path, changeid=None, fileid=None):
475 """changeid can be a changeset revision, node, or tag.
475 """changeid can be a changeset revision, node, or tag.
476 fileid can be a file revision or node."""
476 fileid can be a file revision or node."""
477 return context.filectx(self, path, changeid, fileid)
477 return context.filectx(self, path, changeid, fileid)
478
478
479 def getcwd(self):
479 def getcwd(self):
480 return self.dirstate.getcwd()
480 return self.dirstate.getcwd()
481
481
482 def wfile(self, f, mode='r'):
482 def wfile(self, f, mode='r'):
483 return self.wopener(f, mode)
483 return self.wopener(f, mode)
484
484
485 def _filter(self, filter, filename, data):
485 def _filter(self, filter, filename, data):
486 if filter not in self.filterpats:
486 if filter not in self.filterpats:
487 l = []
487 l = []
488 for pat, cmd in self.ui.configitems(filter):
488 for pat, cmd in self.ui.configitems(filter):
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
489 mf = util.matcher(self.root, "", [pat], [], [])[1]
490 l.append((mf, cmd))
490 l.append((mf, cmd))
491 self.filterpats[filter] = l
491 self.filterpats[filter] = l
492
492
493 for mf, cmd in self.filterpats[filter]:
493 for mf, cmd in self.filterpats[filter]:
494 if mf(filename):
494 if mf(filename):
495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
496 data = util.filter(data, cmd)
496 data = util.filter(data, cmd)
497 break
497 break
498
498
499 return data
499 return data
500
500
501 def wread(self, filename):
501 def wread(self, filename):
502 if self._link(filename):
502 if self._link(filename):
503 data = os.readlink(self.wjoin(filename))
503 data = os.readlink(self.wjoin(filename))
504 else:
504 else:
505 data = self.wopener(filename, 'r').read()
505 data = self.wopener(filename, 'r').read()
506 return self._filter("encode", filename, data)
506 return self._filter("encode", filename, data)
507
507
508 def wwrite(self, filename, data, flags):
508 def wwrite(self, filename, data, flags):
509 data = self._filter("decode", filename, data)
509 data = self._filter("decode", filename, data)
510 if "l" in flags:
510 if "l" in flags:
511 try:
511 try:
512 os.unlink(self.wjoin(filename))
512 os.unlink(self.wjoin(filename))
513 except OSError:
513 except OSError:
514 pass
514 pass
515 os.symlink(data, self.wjoin(filename))
515 os.symlink(data, self.wjoin(filename))
516 else:
516 else:
517 try:
517 try:
518 if self._link(filename):
518 if self._link(filename):
519 os.unlink(self.wjoin(filename))
519 os.unlink(self.wjoin(filename))
520 except OSError:
520 except OSError:
521 pass
521 pass
522 self.wopener(filename, 'w').write(data)
522 self.wopener(filename, 'w').write(data)
523 util.set_exec(self.wjoin(filename), "x" in flags)
523 util.set_exec(self.wjoin(filename), "x" in flags)
524
524
525 def wwritedata(self, filename, data):
525 def wwritedata(self, filename, data):
526 return self._filter("decode", filename, data)
526 return self._filter("decode", filename, data)
527
527
528 def transaction(self):
528 def transaction(self):
529 tr = self.transhandle
529 tr = self.transhandle
530 if tr != None and tr.running():
530 if tr != None and tr.running():
531 return tr.nest()
531 return tr.nest()
532
532
533 # save dirstate for rollback
533 # save dirstate for rollback
534 try:
534 try:
535 ds = self.opener("dirstate").read()
535 ds = self.opener("dirstate").read()
536 except IOError:
536 except IOError:
537 ds = ""
537 ds = ""
538 self.opener("journal.dirstate", "w").write(ds)
538 self.opener("journal.dirstate", "w").write(ds)
539
539
540 renames = [(self.sjoin("journal"), self.sjoin("undo")),
540 renames = [(self.sjoin("journal"), self.sjoin("undo")),
541 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
541 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
542 tr = transaction.transaction(self.ui.warn, self.sopener,
542 tr = transaction.transaction(self.ui.warn, self.sopener,
543 self.sjoin("journal"),
543 self.sjoin("journal"),
544 aftertrans(renames))
544 aftertrans(renames))
545 self.transhandle = tr
545 self.transhandle = tr
546 return tr
546 return tr
547
547
548 def recover(self):
548 def recover(self):
549 l = self.lock()
549 l = self.lock()
550 if os.path.exists(self.sjoin("journal")):
550 if os.path.exists(self.sjoin("journal")):
551 self.ui.status(_("rolling back interrupted transaction\n"))
551 self.ui.status(_("rolling back interrupted transaction\n"))
552 transaction.rollback(self.sopener, self.sjoin("journal"))
552 transaction.rollback(self.sopener, self.sjoin("journal"))
553 self.reload()
553 self.reload()
554 return True
554 return True
555 else:
555 else:
556 self.ui.warn(_("no interrupted transaction available\n"))
556 self.ui.warn(_("no interrupted transaction available\n"))
557 return False
557 return False
558
558
559 def rollback(self, wlock=None):
559 def rollback(self, wlock=None):
560 if not wlock:
560 if not wlock:
561 wlock = self.wlock()
561 wlock = self.wlock()
562 l = self.lock()
562 l = self.lock()
563 if os.path.exists(self.sjoin("undo")):
563 if os.path.exists(self.sjoin("undo")):
564 self.ui.status(_("rolling back last transaction\n"))
564 self.ui.status(_("rolling back last transaction\n"))
565 transaction.rollback(self.sopener, self.sjoin("undo"))
565 transaction.rollback(self.sopener, self.sjoin("undo"))
566 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
566 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
567 self.reload()
567 self.reload()
568 self.wreload()
568 self.wreload()
569 else:
569 else:
570 self.ui.warn(_("no rollback information available\n"))
570 self.ui.warn(_("no rollback information available\n"))
571
571
572 def wreload(self):
572 def wreload(self):
573 self.dirstate.read()
573 self.dirstate.read()
574
574
575 def reload(self):
575 def reload(self):
576 self.changelog.load()
576 self.changelog.load()
577 self.manifest.load()
577 self.manifest.load()
578 self.tagscache = None
578 self.tagscache = None
579 self.nodetagscache = None
579 self.nodetagscache = None
580
580
581 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
581 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
582 desc=None):
582 desc=None):
583 try:
583 try:
584 l = lock.lock(lockname, 0, releasefn, desc=desc)
584 l = lock.lock(lockname, 0, releasefn, desc=desc)
585 except lock.LockHeld, inst:
585 except lock.LockHeld, inst:
586 if not wait:
586 if not wait:
587 raise
587 raise
588 self.ui.warn(_("waiting for lock on %s held by %r\n") %
588 self.ui.warn(_("waiting for lock on %s held by %r\n") %
589 (desc, inst.locker))
589 (desc, inst.locker))
590 # default to 600 seconds timeout
590 # default to 600 seconds timeout
591 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
591 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
592 releasefn, desc=desc)
592 releasefn, desc=desc)
593 if acquirefn:
593 if acquirefn:
594 acquirefn()
594 acquirefn()
595 return l
595 return l
596
596
597 def lock(self, wait=1):
597 def lock(self, wait=1):
598 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
598 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
599 desc=_('repository %s') % self.origroot)
599 desc=_('repository %s') % self.origroot)
600
600
601 def wlock(self, wait=1):
601 def wlock(self, wait=1):
602 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
602 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
603 self.wreload,
603 self.wreload,
604 desc=_('working directory of %s') % self.origroot)
604 desc=_('working directory of %s') % self.origroot)
605
605
606 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
606 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
607 """
607 """
608 commit an individual file as part of a larger transaction
608 commit an individual file as part of a larger transaction
609 """
609 """
610
610
611 t = self.wread(fn)
611 t = self.wread(fn)
612 fl = self.file(fn)
612 fl = self.file(fn)
613 fp1 = manifest1.get(fn, nullid)
613 fp1 = manifest1.get(fn, nullid)
614 fp2 = manifest2.get(fn, nullid)
614 fp2 = manifest2.get(fn, nullid)
615
615
616 meta = {}
616 meta = {}
617 cp = self.dirstate.copied(fn)
617 cp = self.dirstate.copied(fn)
618 if cp:
618 if cp:
619 meta["copy"] = cp
619 meta["copy"] = cp
620 if not manifest2: # not a branch merge
620 if not manifest2: # not a branch merge
621 meta["copyrev"] = hex(manifest1.get(cp, nullid))
621 meta["copyrev"] = hex(manifest1.get(cp, nullid))
622 fp2 = nullid
622 fp2 = nullid
623 elif fp2 != nullid: # copied on remote side
623 elif fp2 != nullid: # copied on remote side
624 meta["copyrev"] = hex(manifest1.get(cp, nullid))
624 meta["copyrev"] = hex(manifest1.get(cp, nullid))
625 elif fp1 != nullid: # copied on local side, reversed
625 elif fp1 != nullid: # copied on local side, reversed
626 meta["copyrev"] = hex(manifest2.get(cp))
626 meta["copyrev"] = hex(manifest2.get(cp))
627 fp2 = nullid
627 fp2 = nullid
628 else: # directory rename
628 else: # directory rename
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 self.ui.debug(_(" %s: copy %s:%s\n") %
630 self.ui.debug(_(" %s: copy %s:%s\n") %
631 (fn, cp, meta["copyrev"]))
631 (fn, cp, meta["copyrev"]))
632 fp1 = nullid
632 fp1 = nullid
633 elif fp2 != nullid:
633 elif fp2 != nullid:
634 # is one parent an ancestor of the other?
634 # is one parent an ancestor of the other?
635 fpa = fl.ancestor(fp1, fp2)
635 fpa = fl.ancestor(fp1, fp2)
636 if fpa == fp1:
636 if fpa == fp1:
637 fp1, fp2 = fp2, nullid
637 fp1, fp2 = fp2, nullid
638 elif fpa == fp2:
638 elif fpa == fp2:
639 fp2 = nullid
639 fp2 = nullid
640
640
641 # is the file unmodified from the parent? report existing entry
641 # is the file unmodified from the parent? report existing entry
642 if fp2 == nullid and not fl.cmp(fp1, t):
642 if fp2 == nullid and not fl.cmp(fp1, t):
643 return fp1
643 return fp1
644
644
645 changelist.append(fn)
645 changelist.append(fn)
646 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
646 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
647
647
648 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
648 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
649 if p1 is None:
649 if p1 is None:
650 p1, p2 = self.dirstate.parents()
650 p1, p2 = self.dirstate.parents()
651 return self.commit(files=files, text=text, user=user, date=date,
651 return self.commit(files=files, text=text, user=user, date=date,
652 p1=p1, p2=p2, wlock=wlock, extra=extra)
652 p1=p1, p2=p2, wlock=wlock, extra=extra)
653
653
654 def commit(self, files=None, text="", user=None, date=None,
654 def commit(self, files=None, text="", user=None, date=None,
655 match=util.always, force=False, lock=None, wlock=None,
655 match=util.always, force=False, lock=None, wlock=None,
656 force_editor=False, p1=None, p2=None, extra={}):
656 force_editor=False, p1=None, p2=None, extra={}):
657
657
658 commit = []
658 commit = []
659 remove = []
659 remove = []
660 changed = []
660 changed = []
661 use_dirstate = (p1 is None) # not rawcommit
661 use_dirstate = (p1 is None) # not rawcommit
662 extra = extra.copy()
662 extra = extra.copy()
663
663
664 if use_dirstate:
664 if use_dirstate:
665 if files:
665 if files:
666 for f in files:
666 for f in files:
667 s = self.dirstate.state(f)
667 s = self.dirstate.state(f)
668 if s in 'nmai':
668 if s in 'nmai':
669 commit.append(f)
669 commit.append(f)
670 elif s == 'r':
670 elif s == 'r':
671 remove.append(f)
671 remove.append(f)
672 else:
672 else:
673 self.ui.warn(_("%s not tracked!\n") % f)
673 self.ui.warn(_("%s not tracked!\n") % f)
674 else:
674 else:
675 changes = self.status(match=match)[:5]
675 changes = self.status(match=match)[:5]
676 modified, added, removed, deleted, unknown = changes
676 modified, added, removed, deleted, unknown = changes
677 commit = modified + added
677 commit = modified + added
678 remove = removed
678 remove = removed
679 else:
679 else:
680 commit = files
680 commit = files
681
681
682 if use_dirstate:
682 if use_dirstate:
683 p1, p2 = self.dirstate.parents()
683 p1, p2 = self.dirstate.parents()
684 update_dirstate = True
684 update_dirstate = True
685 else:
685 else:
686 p1, p2 = p1, p2 or nullid
686 p1, p2 = p1, p2 or nullid
687 update_dirstate = (self.dirstate.parents()[0] == p1)
687 update_dirstate = (self.dirstate.parents()[0] == p1)
688
688
689 c1 = self.changelog.read(p1)
689 c1 = self.changelog.read(p1)
690 c2 = self.changelog.read(p2)
690 c2 = self.changelog.read(p2)
691 m1 = self.manifest.read(c1[0]).copy()
691 m1 = self.manifest.read(c1[0]).copy()
692 m2 = self.manifest.read(c2[0])
692 m2 = self.manifest.read(c2[0])
693
693
694 if use_dirstate:
694 if use_dirstate:
695 branchname = self.workingctx().branch()
695 branchname = self.workingctx().branch()
696 try:
696 try:
697 branchname = branchname.decode('UTF-8').encode('UTF-8')
697 branchname = branchname.decode('UTF-8').encode('UTF-8')
698 except UnicodeDecodeError:
698 except UnicodeDecodeError:
699 raise util.Abort(_('branch name not in UTF-8!'))
699 raise util.Abort(_('branch name not in UTF-8!'))
700 else:
700 else:
701 branchname = ""
701 branchname = ""
702
702
703 if use_dirstate:
703 if use_dirstate:
704 oldname = c1[5].get("branch", "") # stored in UTF-8
704 oldname = c1[5].get("branch", "") # stored in UTF-8
705 if not commit and not remove and not force and p2 == nullid and \
705 if not commit and not remove and not force and p2 == nullid and \
706 branchname == oldname:
706 branchname == oldname:
707 self.ui.status(_("nothing changed\n"))
707 self.ui.status(_("nothing changed\n"))
708 return None
708 return None
709
709
710 xp1 = hex(p1)
710 xp1 = hex(p1)
711 if p2 == nullid: xp2 = ''
711 if p2 == nullid: xp2 = ''
712 else: xp2 = hex(p2)
712 else: xp2 = hex(p2)
713
713
714 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
714 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
715
715
716 if not wlock:
716 if not wlock:
717 wlock = self.wlock()
717 wlock = self.wlock()
718 if not lock:
718 if not lock:
719 lock = self.lock()
719 lock = self.lock()
720 tr = self.transaction()
720 tr = self.transaction()
721
721
722 # check in files
722 # check in files
723 new = {}
723 new = {}
724 linkrev = self.changelog.count()
724 linkrev = self.changelog.count()
725 commit.sort()
725 commit.sort()
726 is_exec = util.execfunc(self.root, m1.execf)
726 is_exec = util.execfunc(self.root, m1.execf)
727 is_link = util.linkfunc(self.root, m1.linkf)
727 is_link = util.linkfunc(self.root, m1.linkf)
728 for f in commit:
728 for f in commit:
729 self.ui.note(f + "\n")
729 self.ui.note(f + "\n")
730 try:
730 try:
731 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
731 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
732 m1.set(f, is_exec(f), is_link(f))
732 m1.set(f, is_exec(f), is_link(f))
733 except OSError:
733 except OSError:
734 if use_dirstate:
734 if use_dirstate:
735 self.ui.warn(_("trouble committing %s!\n") % f)
735 self.ui.warn(_("trouble committing %s!\n") % f)
736 raise
736 raise
737 else:
737 else:
738 remove.append(f)
738 remove.append(f)
739
739
740 # update manifest
740 # update manifest
741 m1.update(new)
741 m1.update(new)
742 remove.sort()
742 remove.sort()
743 removed = []
743 removed = []
744
744
745 for f in remove:
745 for f in remove:
746 if f in m1:
746 if f in m1:
747 del m1[f]
747 del m1[f]
748 removed.append(f)
748 removed.append(f)
749 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
749 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
750
750
751 # add changeset
751 # add changeset
752 new = new.keys()
752 new = new.keys()
753 new.sort()
753 new.sort()
754
754
755 user = user or self.ui.username()
755 user = user or self.ui.username()
756 if not text or force_editor:
756 if not text or force_editor:
757 edittext = []
757 edittext = []
758 if text:
758 if text:
759 edittext.append(text)
759 edittext.append(text)
760 edittext.append("")
760 edittext.append("")
761 edittext.append("HG: user: %s" % user)
761 edittext.append("HG: user: %s" % user)
762 if p2 != nullid:
762 if p2 != nullid:
763 edittext.append("HG: branch merge")
763 edittext.append("HG: branch merge")
764 if branchname:
765 edittext.append("HG: branch %s" % util.tolocal(branchname))
764 edittext.extend(["HG: changed %s" % f for f in changed])
766 edittext.extend(["HG: changed %s" % f for f in changed])
765 edittext.extend(["HG: removed %s" % f for f in removed])
767 edittext.extend(["HG: removed %s" % f for f in removed])
766 if not changed and not remove:
768 if not changed and not remove:
767 edittext.append("HG: no files changed")
769 edittext.append("HG: no files changed")
768 edittext.append("")
770 edittext.append("")
769 # run editor in the repository root
771 # run editor in the repository root
770 olddir = os.getcwd()
772 olddir = os.getcwd()
771 os.chdir(self.root)
773 os.chdir(self.root)
772 text = self.ui.edit("\n".join(edittext), user)
774 text = self.ui.edit("\n".join(edittext), user)
773 os.chdir(olddir)
775 os.chdir(olddir)
774
776
775 lines = [line.rstrip() for line in text.rstrip().splitlines()]
777 lines = [line.rstrip() for line in text.rstrip().splitlines()]
776 while lines and not lines[0]:
778 while lines and not lines[0]:
777 del lines[0]
779 del lines[0]
778 if not lines:
780 if not lines:
779 return None
781 return None
780 text = '\n'.join(lines)
782 text = '\n'.join(lines)
781 if branchname:
783 if branchname:
782 extra["branch"] = branchname
784 extra["branch"] = branchname
783 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
785 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
784 user, date, extra)
786 user, date, extra)
785 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
787 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
786 parent2=xp2)
788 parent2=xp2)
787 tr.close()
789 tr.close()
788
790
791 if self.branchcache and "branch" in extra:
792 self.branchcache[util.tolocal(extra["branch"])] = n
793
789 if use_dirstate or update_dirstate:
794 if use_dirstate or update_dirstate:
790 self.dirstate.setparents(n)
795 self.dirstate.setparents(n)
791 if use_dirstate:
796 if use_dirstate:
792 self.dirstate.update(new, "n")
797 self.dirstate.update(new, "n")
793 self.dirstate.forget(removed)
798 self.dirstate.forget(removed)
794
799
795 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
800 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
796 return n
801 return n
797
802
798 def walk(self, node=None, files=[], match=util.always, badmatch=None):
803 def walk(self, node=None, files=[], match=util.always, badmatch=None):
799 '''
804 '''
800 walk recursively through the directory tree or a given
805 walk recursively through the directory tree or a given
801 changeset, finding all files matched by the match
806 changeset, finding all files matched by the match
802 function
807 function
803
808
804 results are yielded in a tuple (src, filename), where src
809 results are yielded in a tuple (src, filename), where src
805 is one of:
810 is one of:
806 'f' the file was found in the directory tree
811 'f' the file was found in the directory tree
807 'm' the file was only in the dirstate and not in the tree
812 'm' the file was only in the dirstate and not in the tree
808 'b' file was not found and matched badmatch
813 'b' file was not found and matched badmatch
809 '''
814 '''
810
815
811 if node:
816 if node:
812 fdict = dict.fromkeys(files)
817 fdict = dict.fromkeys(files)
813 for fn in self.manifest.read(self.changelog.read(node)[0]):
818 for fn in self.manifest.read(self.changelog.read(node)[0]):
814 for ffn in fdict:
819 for ffn in fdict:
815 # match if the file is the exact name or a directory
820 # match if the file is the exact name or a directory
816 if ffn == fn or fn.startswith("%s/" % ffn):
821 if ffn == fn or fn.startswith("%s/" % ffn):
817 del fdict[ffn]
822 del fdict[ffn]
818 break
823 break
819 if match(fn):
824 if match(fn):
820 yield 'm', fn
825 yield 'm', fn
821 for fn in fdict:
826 for fn in fdict:
822 if badmatch and badmatch(fn):
827 if badmatch and badmatch(fn):
823 if match(fn):
828 if match(fn):
824 yield 'b', fn
829 yield 'b', fn
825 else:
830 else:
826 self.ui.warn(_('%s: No such file in rev %s\n') % (
831 self.ui.warn(_('%s: No such file in rev %s\n') % (
827 util.pathto(self.getcwd(), fn), short(node)))
832 util.pathto(self.getcwd(), fn), short(node)))
828 else:
833 else:
829 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
834 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
830 yield src, fn
835 yield src, fn
831
836
832 def status(self, node1=None, node2=None, files=[], match=util.always,
837 def status(self, node1=None, node2=None, files=[], match=util.always,
833 wlock=None, list_ignored=False, list_clean=False):
838 wlock=None, list_ignored=False, list_clean=False):
834 """return status of files between two nodes or node and working directory
839 """return status of files between two nodes or node and working directory
835
840
836 If node1 is None, use the first dirstate parent instead.
841 If node1 is None, use the first dirstate parent instead.
837 If node2 is None, compare node1 with working directory.
842 If node2 is None, compare node1 with working directory.
838 """
843 """
839
844
840 def fcmp(fn, mf):
845 def fcmp(fn, mf):
841 t1 = self.wread(fn)
846 t1 = self.wread(fn)
842 return self.file(fn).cmp(mf.get(fn, nullid), t1)
847 return self.file(fn).cmp(mf.get(fn, nullid), t1)
843
848
844 def mfmatches(node):
849 def mfmatches(node):
845 change = self.changelog.read(node)
850 change = self.changelog.read(node)
846 mf = self.manifest.read(change[0]).copy()
851 mf = self.manifest.read(change[0]).copy()
847 for fn in mf.keys():
852 for fn in mf.keys():
848 if not match(fn):
853 if not match(fn):
849 del mf[fn]
854 del mf[fn]
850 return mf
855 return mf
851
856
852 modified, added, removed, deleted, unknown = [], [], [], [], []
857 modified, added, removed, deleted, unknown = [], [], [], [], []
853 ignored, clean = [], []
858 ignored, clean = [], []
854
859
855 compareworking = False
860 compareworking = False
856 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
861 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
857 compareworking = True
862 compareworking = True
858
863
859 if not compareworking:
864 if not compareworking:
860 # read the manifest from node1 before the manifest from node2,
865 # read the manifest from node1 before the manifest from node2,
861 # so that we'll hit the manifest cache if we're going through
866 # so that we'll hit the manifest cache if we're going through
862 # all the revisions in parent->child order.
867 # all the revisions in parent->child order.
863 mf1 = mfmatches(node1)
868 mf1 = mfmatches(node1)
864
869
865 # are we comparing the working directory?
870 # are we comparing the working directory?
866 if not node2:
871 if not node2:
867 if not wlock:
872 if not wlock:
868 try:
873 try:
869 wlock = self.wlock(wait=0)
874 wlock = self.wlock(wait=0)
870 except lock.LockException:
875 except lock.LockException:
871 wlock = None
876 wlock = None
872 (lookup, modified, added, removed, deleted, unknown,
877 (lookup, modified, added, removed, deleted, unknown,
873 ignored, clean) = self.dirstate.status(files, match,
878 ignored, clean) = self.dirstate.status(files, match,
874 list_ignored, list_clean)
879 list_ignored, list_clean)
875
880
876 # are we comparing working dir against its parent?
881 # are we comparing working dir against its parent?
877 if compareworking:
882 if compareworking:
878 if lookup:
883 if lookup:
879 # do a full compare of any files that might have changed
884 # do a full compare of any files that might have changed
880 mf2 = mfmatches(self.dirstate.parents()[0])
885 mf2 = mfmatches(self.dirstate.parents()[0])
881 for f in lookup:
886 for f in lookup:
882 if fcmp(f, mf2):
887 if fcmp(f, mf2):
883 modified.append(f)
888 modified.append(f)
884 else:
889 else:
885 clean.append(f)
890 clean.append(f)
886 if wlock is not None:
891 if wlock is not None:
887 self.dirstate.update([f], "n")
892 self.dirstate.update([f], "n")
888 else:
893 else:
889 # we are comparing working dir against non-parent
894 # we are comparing working dir against non-parent
890 # generate a pseudo-manifest for the working dir
895 # generate a pseudo-manifest for the working dir
891 # XXX: create it in dirstate.py ?
896 # XXX: create it in dirstate.py ?
892 mf2 = mfmatches(self.dirstate.parents()[0])
897 mf2 = mfmatches(self.dirstate.parents()[0])
893 is_exec = util.execfunc(self.root, mf2.execf)
898 is_exec = util.execfunc(self.root, mf2.execf)
894 is_link = util.linkfunc(self.root, mf2.linkf)
899 is_link = util.linkfunc(self.root, mf2.linkf)
895 for f in lookup + modified + added:
900 for f in lookup + modified + added:
896 mf2[f] = ""
901 mf2[f] = ""
897 mf2.set(f, is_exec(f), is_link(f))
902 mf2.set(f, is_exec(f), is_link(f))
898 for f in removed:
903 for f in removed:
899 if f in mf2:
904 if f in mf2:
900 del mf2[f]
905 del mf2[f]
901 else:
906 else:
902 # we are comparing two revisions
907 # we are comparing two revisions
903 mf2 = mfmatches(node2)
908 mf2 = mfmatches(node2)
904
909
905 if not compareworking:
910 if not compareworking:
906 # flush lists from dirstate before comparing manifests
911 # flush lists from dirstate before comparing manifests
907 modified, added, clean = [], [], []
912 modified, added, clean = [], [], []
908
913
909 # make sure to sort the files so we talk to the disk in a
914 # make sure to sort the files so we talk to the disk in a
910 # reasonable order
915 # reasonable order
911 mf2keys = mf2.keys()
916 mf2keys = mf2.keys()
912 mf2keys.sort()
917 mf2keys.sort()
913 for fn in mf2keys:
918 for fn in mf2keys:
914 if mf1.has_key(fn):
919 if mf1.has_key(fn):
915 if mf1.flags(fn) != mf2.flags(fn) or \
920 if mf1.flags(fn) != mf2.flags(fn) or \
916 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
921 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
917 modified.append(fn)
922 modified.append(fn)
918 elif list_clean:
923 elif list_clean:
919 clean.append(fn)
924 clean.append(fn)
920 del mf1[fn]
925 del mf1[fn]
921 else:
926 else:
922 added.append(fn)
927 added.append(fn)
923
928
924 removed = mf1.keys()
929 removed = mf1.keys()
925
930
926 # sort and return results:
931 # sort and return results:
927 for l in modified, added, removed, deleted, unknown, ignored, clean:
932 for l in modified, added, removed, deleted, unknown, ignored, clean:
928 l.sort()
933 l.sort()
929 return (modified, added, removed, deleted, unknown, ignored, clean)
934 return (modified, added, removed, deleted, unknown, ignored, clean)
930
935
931 def add(self, list, wlock=None):
936 def add(self, list, wlock=None):
932 if not wlock:
937 if not wlock:
933 wlock = self.wlock()
938 wlock = self.wlock()
934 for f in list:
939 for f in list:
935 p = self.wjoin(f)
940 p = self.wjoin(f)
936 islink = os.path.islink(p)
941 islink = os.path.islink(p)
937 if not islink and not os.path.exists(p):
942 if not islink and not os.path.exists(p):
938 self.ui.warn(_("%s does not exist!\n") % f)
943 self.ui.warn(_("%s does not exist!\n") % f)
939 elif not islink and not os.path.isfile(p):
944 elif not islink and not os.path.isfile(p):
940 self.ui.warn(_("%s not added: only files and symlinks "
945 self.ui.warn(_("%s not added: only files and symlinks "
941 "supported currently\n") % f)
946 "supported currently\n") % f)
942 elif self.dirstate.state(f) in 'an':
947 elif self.dirstate.state(f) in 'an':
943 self.ui.warn(_("%s already tracked!\n") % f)
948 self.ui.warn(_("%s already tracked!\n") % f)
944 else:
949 else:
945 self.dirstate.update([f], "a")
950 self.dirstate.update([f], "a")
946
951
947 def forget(self, list, wlock=None):
952 def forget(self, list, wlock=None):
948 if not wlock:
953 if not wlock:
949 wlock = self.wlock()
954 wlock = self.wlock()
950 for f in list:
955 for f in list:
951 if self.dirstate.state(f) not in 'ai':
956 if self.dirstate.state(f) not in 'ai':
952 self.ui.warn(_("%s not added!\n") % f)
957 self.ui.warn(_("%s not added!\n") % f)
953 else:
958 else:
954 self.dirstate.forget([f])
959 self.dirstate.forget([f])
955
960
956 def remove(self, list, unlink=False, wlock=None):
961 def remove(self, list, unlink=False, wlock=None):
957 if unlink:
962 if unlink:
958 for f in list:
963 for f in list:
959 try:
964 try:
960 util.unlink(self.wjoin(f))
965 util.unlink(self.wjoin(f))
961 except OSError, inst:
966 except OSError, inst:
962 if inst.errno != errno.ENOENT:
967 if inst.errno != errno.ENOENT:
963 raise
968 raise
964 if not wlock:
969 if not wlock:
965 wlock = self.wlock()
970 wlock = self.wlock()
966 for f in list:
971 for f in list:
967 p = self.wjoin(f)
972 p = self.wjoin(f)
968 if os.path.exists(p):
973 if os.path.exists(p):
969 self.ui.warn(_("%s still exists!\n") % f)
974 self.ui.warn(_("%s still exists!\n") % f)
970 elif self.dirstate.state(f) == 'a':
975 elif self.dirstate.state(f) == 'a':
971 self.dirstate.forget([f])
976 self.dirstate.forget([f])
972 elif f not in self.dirstate:
977 elif f not in self.dirstate:
973 self.ui.warn(_("%s not tracked!\n") % f)
978 self.ui.warn(_("%s not tracked!\n") % f)
974 else:
979 else:
975 self.dirstate.update([f], "r")
980 self.dirstate.update([f], "r")
976
981
977 def undelete(self, list, wlock=None):
982 def undelete(self, list, wlock=None):
978 p = self.dirstate.parents()[0]
983 p = self.dirstate.parents()[0]
979 mn = self.changelog.read(p)[0]
984 mn = self.changelog.read(p)[0]
980 m = self.manifest.read(mn)
985 m = self.manifest.read(mn)
981 if not wlock:
986 if not wlock:
982 wlock = self.wlock()
987 wlock = self.wlock()
983 for f in list:
988 for f in list:
984 if self.dirstate.state(f) not in "r":
989 if self.dirstate.state(f) not in "r":
985 self.ui.warn("%s not removed!\n" % f)
990 self.ui.warn("%s not removed!\n" % f)
986 else:
991 else:
987 t = self.file(f).read(m[f])
992 t = self.file(f).read(m[f])
988 self.wwrite(f, t, m.flags(f))
993 self.wwrite(f, t, m.flags(f))
989 self.dirstate.update([f], "n")
994 self.dirstate.update([f], "n")
990
995
991 def copy(self, source, dest, wlock=None):
996 def copy(self, source, dest, wlock=None):
992 p = self.wjoin(dest)
997 p = self.wjoin(dest)
993 if not os.path.exists(p):
998 if not os.path.exists(p):
994 self.ui.warn(_("%s does not exist!\n") % dest)
999 self.ui.warn(_("%s does not exist!\n") % dest)
995 elif not os.path.isfile(p):
1000 elif not os.path.isfile(p):
996 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1001 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
997 else:
1002 else:
998 if not wlock:
1003 if not wlock:
999 wlock = self.wlock()
1004 wlock = self.wlock()
1000 if self.dirstate.state(dest) == '?':
1005 if self.dirstate.state(dest) == '?':
1001 self.dirstate.update([dest], "a")
1006 self.dirstate.update([dest], "a")
1002 self.dirstate.copy(source, dest)
1007 self.dirstate.copy(source, dest)
1003
1008
1004 def heads(self, start=None):
1009 def heads(self, start=None):
1005 heads = self.changelog.heads(start)
1010 heads = self.changelog.heads(start)
1006 # sort the output in rev descending order
1011 # sort the output in rev descending order
1007 heads = [(-self.changelog.rev(h), h) for h in heads]
1012 heads = [(-self.changelog.rev(h), h) for h in heads]
1008 heads.sort()
1013 heads.sort()
1009 return [n for (r, n) in heads]
1014 return [n for (r, n) in heads]
1010
1015
1011 def branches(self, nodes):
1016 def branches(self, nodes):
1012 if not nodes:
1017 if not nodes:
1013 nodes = [self.changelog.tip()]
1018 nodes = [self.changelog.tip()]
1014 b = []
1019 b = []
1015 for n in nodes:
1020 for n in nodes:
1016 t = n
1021 t = n
1017 while 1:
1022 while 1:
1018 p = self.changelog.parents(n)
1023 p = self.changelog.parents(n)
1019 if p[1] != nullid or p[0] == nullid:
1024 if p[1] != nullid or p[0] == nullid:
1020 b.append((t, n, p[0], p[1]))
1025 b.append((t, n, p[0], p[1]))
1021 break
1026 break
1022 n = p[0]
1027 n = p[0]
1023 return b
1028 return b
1024
1029
1025 def between(self, pairs):
1030 def between(self, pairs):
1026 r = []
1031 r = []
1027
1032
1028 for top, bottom in pairs:
1033 for top, bottom in pairs:
1029 n, l, i = top, [], 0
1034 n, l, i = top, [], 0
1030 f = 1
1035 f = 1
1031
1036
1032 while n != bottom:
1037 while n != bottom:
1033 p = self.changelog.parents(n)[0]
1038 p = self.changelog.parents(n)[0]
1034 if i == f:
1039 if i == f:
1035 l.append(n)
1040 l.append(n)
1036 f = f * 2
1041 f = f * 2
1037 n = p
1042 n = p
1038 i += 1
1043 i += 1
1039
1044
1040 r.append(l)
1045 r.append(l)
1041
1046
1042 return r
1047 return r
1043
1048
1044 def findincoming(self, remote, base=None, heads=None, force=False):
1049 def findincoming(self, remote, base=None, heads=None, force=False):
1045 """Return list of roots of the subsets of missing nodes from remote
1050 """Return list of roots of the subsets of missing nodes from remote
1046
1051
1047 If base dict is specified, assume that these nodes and their parents
1052 If base dict is specified, assume that these nodes and their parents
1048 exist on the remote side and that no child of a node of base exists
1053 exist on the remote side and that no child of a node of base exists
1049 in both remote and self.
1054 in both remote and self.
1050 Furthermore base will be updated to include the nodes that exists
1055 Furthermore base will be updated to include the nodes that exists
1051 in self and remote but no children exists in self and remote.
1056 in self and remote but no children exists in self and remote.
1052 If a list of heads is specified, return only nodes which are heads
1057 If a list of heads is specified, return only nodes which are heads
1053 or ancestors of these heads.
1058 or ancestors of these heads.
1054
1059
1055 All the ancestors of base are in self and in remote.
1060 All the ancestors of base are in self and in remote.
1056 All the descendants of the list returned are missing in self.
1061 All the descendants of the list returned are missing in self.
1057 (and so we know that the rest of the nodes are missing in remote, see
1062 (and so we know that the rest of the nodes are missing in remote, see
1058 outgoing)
1063 outgoing)
1059 """
1064 """
1060 m = self.changelog.nodemap
1065 m = self.changelog.nodemap
1061 search = []
1066 search = []
1062 fetch = {}
1067 fetch = {}
1063 seen = {}
1068 seen = {}
1064 seenbranch = {}
1069 seenbranch = {}
1065 if base == None:
1070 if base == None:
1066 base = {}
1071 base = {}
1067
1072
1068 if not heads:
1073 if not heads:
1069 heads = remote.heads()
1074 heads = remote.heads()
1070
1075
1071 if self.changelog.tip() == nullid:
1076 if self.changelog.tip() == nullid:
1072 base[nullid] = 1
1077 base[nullid] = 1
1073 if heads != [nullid]:
1078 if heads != [nullid]:
1074 return [nullid]
1079 return [nullid]
1075 return []
1080 return []
1076
1081
1077 # assume we're closer to the tip than the root
1082 # assume we're closer to the tip than the root
1078 # and start by examining the heads
1083 # and start by examining the heads
1079 self.ui.status(_("searching for changes\n"))
1084 self.ui.status(_("searching for changes\n"))
1080
1085
1081 unknown = []
1086 unknown = []
1082 for h in heads:
1087 for h in heads:
1083 if h not in m:
1088 if h not in m:
1084 unknown.append(h)
1089 unknown.append(h)
1085 else:
1090 else:
1086 base[h] = 1
1091 base[h] = 1
1087
1092
1088 if not unknown:
1093 if not unknown:
1089 return []
1094 return []
1090
1095
1091 req = dict.fromkeys(unknown)
1096 req = dict.fromkeys(unknown)
1092 reqcnt = 0
1097 reqcnt = 0
1093
1098
1094 # search through remote branches
1099 # search through remote branches
1095 # a 'branch' here is a linear segment of history, with four parts:
1100 # a 'branch' here is a linear segment of history, with four parts:
1096 # head, root, first parent, second parent
1101 # head, root, first parent, second parent
1097 # (a branch always has two parents (or none) by definition)
1102 # (a branch always has two parents (or none) by definition)
1098 unknown = remote.branches(unknown)
1103 unknown = remote.branches(unknown)
1099 while unknown:
1104 while unknown:
1100 r = []
1105 r = []
1101 while unknown:
1106 while unknown:
1102 n = unknown.pop(0)
1107 n = unknown.pop(0)
1103 if n[0] in seen:
1108 if n[0] in seen:
1104 continue
1109 continue
1105
1110
1106 self.ui.debug(_("examining %s:%s\n")
1111 self.ui.debug(_("examining %s:%s\n")
1107 % (short(n[0]), short(n[1])))
1112 % (short(n[0]), short(n[1])))
1108 if n[0] == nullid: # found the end of the branch
1113 if n[0] == nullid: # found the end of the branch
1109 pass
1114 pass
1110 elif n in seenbranch:
1115 elif n in seenbranch:
1111 self.ui.debug(_("branch already found\n"))
1116 self.ui.debug(_("branch already found\n"))
1112 continue
1117 continue
1113 elif n[1] and n[1] in m: # do we know the base?
1118 elif n[1] and n[1] in m: # do we know the base?
1114 self.ui.debug(_("found incomplete branch %s:%s\n")
1119 self.ui.debug(_("found incomplete branch %s:%s\n")
1115 % (short(n[0]), short(n[1])))
1120 % (short(n[0]), short(n[1])))
1116 search.append(n) # schedule branch range for scanning
1121 search.append(n) # schedule branch range for scanning
1117 seenbranch[n] = 1
1122 seenbranch[n] = 1
1118 else:
1123 else:
1119 if n[1] not in seen and n[1] not in fetch:
1124 if n[1] not in seen and n[1] not in fetch:
1120 if n[2] in m and n[3] in m:
1125 if n[2] in m and n[3] in m:
1121 self.ui.debug(_("found new changeset %s\n") %
1126 self.ui.debug(_("found new changeset %s\n") %
1122 short(n[1]))
1127 short(n[1]))
1123 fetch[n[1]] = 1 # earliest unknown
1128 fetch[n[1]] = 1 # earliest unknown
1124 for p in n[2:4]:
1129 for p in n[2:4]:
1125 if p in m:
1130 if p in m:
1126 base[p] = 1 # latest known
1131 base[p] = 1 # latest known
1127
1132
1128 for p in n[2:4]:
1133 for p in n[2:4]:
1129 if p not in req and p not in m:
1134 if p not in req and p not in m:
1130 r.append(p)
1135 r.append(p)
1131 req[p] = 1
1136 req[p] = 1
1132 seen[n[0]] = 1
1137 seen[n[0]] = 1
1133
1138
1134 if r:
1139 if r:
1135 reqcnt += 1
1140 reqcnt += 1
1136 self.ui.debug(_("request %d: %s\n") %
1141 self.ui.debug(_("request %d: %s\n") %
1137 (reqcnt, " ".join(map(short, r))))
1142 (reqcnt, " ".join(map(short, r))))
1138 for p in xrange(0, len(r), 10):
1143 for p in xrange(0, len(r), 10):
1139 for b in remote.branches(r[p:p+10]):
1144 for b in remote.branches(r[p:p+10]):
1140 self.ui.debug(_("received %s:%s\n") %
1145 self.ui.debug(_("received %s:%s\n") %
1141 (short(b[0]), short(b[1])))
1146 (short(b[0]), short(b[1])))
1142 unknown.append(b)
1147 unknown.append(b)
1143
1148
1144 # do binary search on the branches we found
1149 # do binary search on the branches we found
1145 while search:
1150 while search:
1146 n = search.pop(0)
1151 n = search.pop(0)
1147 reqcnt += 1
1152 reqcnt += 1
1148 l = remote.between([(n[0], n[1])])[0]
1153 l = remote.between([(n[0], n[1])])[0]
1149 l.append(n[1])
1154 l.append(n[1])
1150 p = n[0]
1155 p = n[0]
1151 f = 1
1156 f = 1
1152 for i in l:
1157 for i in l:
1153 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1158 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1154 if i in m:
1159 if i in m:
1155 if f <= 2:
1160 if f <= 2:
1156 self.ui.debug(_("found new branch changeset %s\n") %
1161 self.ui.debug(_("found new branch changeset %s\n") %
1157 short(p))
1162 short(p))
1158 fetch[p] = 1
1163 fetch[p] = 1
1159 base[i] = 1
1164 base[i] = 1
1160 else:
1165 else:
1161 self.ui.debug(_("narrowed branch search to %s:%s\n")
1166 self.ui.debug(_("narrowed branch search to %s:%s\n")
1162 % (short(p), short(i)))
1167 % (short(p), short(i)))
1163 search.append((p, i))
1168 search.append((p, i))
1164 break
1169 break
1165 p, f = i, f * 2
1170 p, f = i, f * 2
1166
1171
1167 # sanity check our fetch list
1172 # sanity check our fetch list
1168 for f in fetch.keys():
1173 for f in fetch.keys():
1169 if f in m:
1174 if f in m:
1170 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1175 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1171
1176
1172 if base.keys() == [nullid]:
1177 if base.keys() == [nullid]:
1173 if force:
1178 if force:
1174 self.ui.warn(_("warning: repository is unrelated\n"))
1179 self.ui.warn(_("warning: repository is unrelated\n"))
1175 else:
1180 else:
1176 raise util.Abort(_("repository is unrelated"))
1181 raise util.Abort(_("repository is unrelated"))
1177
1182
1178 self.ui.debug(_("found new changesets starting at ") +
1183 self.ui.debug(_("found new changesets starting at ") +
1179 " ".join([short(f) for f in fetch]) + "\n")
1184 " ".join([short(f) for f in fetch]) + "\n")
1180
1185
1181 self.ui.debug(_("%d total queries\n") % reqcnt)
1186 self.ui.debug(_("%d total queries\n") % reqcnt)
1182
1187
1183 return fetch.keys()
1188 return fetch.keys()
1184
1189
1185 def findoutgoing(self, remote, base=None, heads=None, force=False):
1190 def findoutgoing(self, remote, base=None, heads=None, force=False):
1186 """Return list of nodes that are roots of subsets not in remote
1191 """Return list of nodes that are roots of subsets not in remote
1187
1192
1188 If base dict is specified, assume that these nodes and their parents
1193 If base dict is specified, assume that these nodes and their parents
1189 exist on the remote side.
1194 exist on the remote side.
1190 If a list of heads is specified, return only nodes which are heads
1195 If a list of heads is specified, return only nodes which are heads
1191 or ancestors of these heads, and return a second element which
1196 or ancestors of these heads, and return a second element which
1192 contains all remote heads which get new children.
1197 contains all remote heads which get new children.
1193 """
1198 """
1194 if base == None:
1199 if base == None:
1195 base = {}
1200 base = {}
1196 self.findincoming(remote, base, heads, force=force)
1201 self.findincoming(remote, base, heads, force=force)
1197
1202
1198 self.ui.debug(_("common changesets up to ")
1203 self.ui.debug(_("common changesets up to ")
1199 + " ".join(map(short, base.keys())) + "\n")
1204 + " ".join(map(short, base.keys())) + "\n")
1200
1205
1201 remain = dict.fromkeys(self.changelog.nodemap)
1206 remain = dict.fromkeys(self.changelog.nodemap)
1202
1207
1203 # prune everything remote has from the tree
1208 # prune everything remote has from the tree
1204 del remain[nullid]
1209 del remain[nullid]
1205 remove = base.keys()
1210 remove = base.keys()
1206 while remove:
1211 while remove:
1207 n = remove.pop(0)
1212 n = remove.pop(0)
1208 if n in remain:
1213 if n in remain:
1209 del remain[n]
1214 del remain[n]
1210 for p in self.changelog.parents(n):
1215 for p in self.changelog.parents(n):
1211 remove.append(p)
1216 remove.append(p)
1212
1217
1213 # find every node whose parents have been pruned
1218 # find every node whose parents have been pruned
1214 subset = []
1219 subset = []
1215 # find every remote head that will get new children
1220 # find every remote head that will get new children
1216 updated_heads = {}
1221 updated_heads = {}
1217 for n in remain:
1222 for n in remain:
1218 p1, p2 = self.changelog.parents(n)
1223 p1, p2 = self.changelog.parents(n)
1219 if p1 not in remain and p2 not in remain:
1224 if p1 not in remain and p2 not in remain:
1220 subset.append(n)
1225 subset.append(n)
1221 if heads:
1226 if heads:
1222 if p1 in heads:
1227 if p1 in heads:
1223 updated_heads[p1] = True
1228 updated_heads[p1] = True
1224 if p2 in heads:
1229 if p2 in heads:
1225 updated_heads[p2] = True
1230 updated_heads[p2] = True
1226
1231
1227 # this is the set of all roots we have to push
1232 # this is the set of all roots we have to push
1228 if heads:
1233 if heads:
1229 return subset, updated_heads.keys()
1234 return subset, updated_heads.keys()
1230 else:
1235 else:
1231 return subset
1236 return subset
1232
1237
1233 def pull(self, remote, heads=None, force=False, lock=None):
1238 def pull(self, remote, heads=None, force=False, lock=None):
1234 mylock = False
1239 mylock = False
1235 if not lock:
1240 if not lock:
1236 lock = self.lock()
1241 lock = self.lock()
1237 mylock = True
1242 mylock = True
1238
1243
1239 try:
1244 try:
1240 fetch = self.findincoming(remote, force=force)
1245 fetch = self.findincoming(remote, force=force)
1241 if fetch == [nullid]:
1246 if fetch == [nullid]:
1242 self.ui.status(_("requesting all changes\n"))
1247 self.ui.status(_("requesting all changes\n"))
1243
1248
1244 if not fetch:
1249 if not fetch:
1245 self.ui.status(_("no changes found\n"))
1250 self.ui.status(_("no changes found\n"))
1246 return 0
1251 return 0
1247
1252
1248 if heads is None:
1253 if heads is None:
1249 cg = remote.changegroup(fetch, 'pull')
1254 cg = remote.changegroup(fetch, 'pull')
1250 else:
1255 else:
1251 if 'changegroupsubset' not in remote.capabilities:
1256 if 'changegroupsubset' not in remote.capabilities:
1252 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1257 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1253 cg = remote.changegroupsubset(fetch, heads, 'pull')
1258 cg = remote.changegroupsubset(fetch, heads, 'pull')
1254 return self.addchangegroup(cg, 'pull', remote.url())
1259 return self.addchangegroup(cg, 'pull', remote.url())
1255 finally:
1260 finally:
1256 if mylock:
1261 if mylock:
1257 lock.release()
1262 lock.release()
1258
1263
1259 def push(self, remote, force=False, revs=None):
1264 def push(self, remote, force=False, revs=None):
1260 # there are two ways to push to remote repo:
1265 # there are two ways to push to remote repo:
1261 #
1266 #
1262 # addchangegroup assumes local user can lock remote
1267 # addchangegroup assumes local user can lock remote
1263 # repo (local filesystem, old ssh servers).
1268 # repo (local filesystem, old ssh servers).
1264 #
1269 #
1265 # unbundle assumes local user cannot lock remote repo (new ssh
1270 # unbundle assumes local user cannot lock remote repo (new ssh
1266 # servers, http servers).
1271 # servers, http servers).
1267
1272
1268 if remote.capable('unbundle'):
1273 if remote.capable('unbundle'):
1269 return self.push_unbundle(remote, force, revs)
1274 return self.push_unbundle(remote, force, revs)
1270 return self.push_addchangegroup(remote, force, revs)
1275 return self.push_addchangegroup(remote, force, revs)
1271
1276
1272 def prepush(self, remote, force, revs):
1277 def prepush(self, remote, force, revs):
1273 base = {}
1278 base = {}
1274 remote_heads = remote.heads()
1279 remote_heads = remote.heads()
1275 inc = self.findincoming(remote, base, remote_heads, force=force)
1280 inc = self.findincoming(remote, base, remote_heads, force=force)
1276
1281
1277 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1282 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1278 if revs is not None:
1283 if revs is not None:
1279 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1284 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1280 else:
1285 else:
1281 bases, heads = update, self.changelog.heads()
1286 bases, heads = update, self.changelog.heads()
1282
1287
1283 if not bases:
1288 if not bases:
1284 self.ui.status(_("no changes found\n"))
1289 self.ui.status(_("no changes found\n"))
1285 return None, 1
1290 return None, 1
1286 elif not force:
1291 elif not force:
1287 # check if we're creating new remote heads
1292 # check if we're creating new remote heads
1288 # to be a remote head after push, node must be either
1293 # to be a remote head after push, node must be either
1289 # - unknown locally
1294 # - unknown locally
1290 # - a local outgoing head descended from update
1295 # - a local outgoing head descended from update
1291 # - a remote head that's known locally and not
1296 # - a remote head that's known locally and not
1292 # ancestral to an outgoing head
1297 # ancestral to an outgoing head
1293
1298
1294 warn = 0
1299 warn = 0
1295
1300
1296 if remote_heads == [nullid]:
1301 if remote_heads == [nullid]:
1297 warn = 0
1302 warn = 0
1298 elif not revs and len(heads) > len(remote_heads):
1303 elif not revs and len(heads) > len(remote_heads):
1299 warn = 1
1304 warn = 1
1300 else:
1305 else:
1301 newheads = list(heads)
1306 newheads = list(heads)
1302 for r in remote_heads:
1307 for r in remote_heads:
1303 if r in self.changelog.nodemap:
1308 if r in self.changelog.nodemap:
1304 desc = self.changelog.heads(r, heads)
1309 desc = self.changelog.heads(r, heads)
1305 l = [h for h in heads if h in desc]
1310 l = [h for h in heads if h in desc]
1306 if not l:
1311 if not l:
1307 newheads.append(r)
1312 newheads.append(r)
1308 else:
1313 else:
1309 newheads.append(r)
1314 newheads.append(r)
1310 if len(newheads) > len(remote_heads):
1315 if len(newheads) > len(remote_heads):
1311 warn = 1
1316 warn = 1
1312
1317
1313 if warn:
1318 if warn:
1314 self.ui.warn(_("abort: push creates new remote branches!\n"))
1319 self.ui.warn(_("abort: push creates new remote branches!\n"))
1315 self.ui.status(_("(did you forget to merge?"
1320 self.ui.status(_("(did you forget to merge?"
1316 " use push -f to force)\n"))
1321 " use push -f to force)\n"))
1317 return None, 1
1322 return None, 1
1318 elif inc:
1323 elif inc:
1319 self.ui.warn(_("note: unsynced remote changes!\n"))
1324 self.ui.warn(_("note: unsynced remote changes!\n"))
1320
1325
1321
1326
1322 if revs is None:
1327 if revs is None:
1323 cg = self.changegroup(update, 'push')
1328 cg = self.changegroup(update, 'push')
1324 else:
1329 else:
1325 cg = self.changegroupsubset(update, revs, 'push')
1330 cg = self.changegroupsubset(update, revs, 'push')
1326 return cg, remote_heads
1331 return cg, remote_heads
1327
1332
1328 def push_addchangegroup(self, remote, force, revs):
1333 def push_addchangegroup(self, remote, force, revs):
1329 lock = remote.lock()
1334 lock = remote.lock()
1330
1335
1331 ret = self.prepush(remote, force, revs)
1336 ret = self.prepush(remote, force, revs)
1332 if ret[0] is not None:
1337 if ret[0] is not None:
1333 cg, remote_heads = ret
1338 cg, remote_heads = ret
1334 return remote.addchangegroup(cg, 'push', self.url())
1339 return remote.addchangegroup(cg, 'push', self.url())
1335 return ret[1]
1340 return ret[1]
1336
1341
1337 def push_unbundle(self, remote, force, revs):
1342 def push_unbundle(self, remote, force, revs):
1338 # local repo finds heads on server, finds out what revs it
1343 # local repo finds heads on server, finds out what revs it
1339 # must push. once revs transferred, if server finds it has
1344 # must push. once revs transferred, if server finds it has
1340 # different heads (someone else won commit/push race), server
1345 # different heads (someone else won commit/push race), server
1341 # aborts.
1346 # aborts.
1342
1347
1343 ret = self.prepush(remote, force, revs)
1348 ret = self.prepush(remote, force, revs)
1344 if ret[0] is not None:
1349 if ret[0] is not None:
1345 cg, remote_heads = ret
1350 cg, remote_heads = ret
1346 if force: remote_heads = ['force']
1351 if force: remote_heads = ['force']
1347 return remote.unbundle(cg, remote_heads, 'push')
1352 return remote.unbundle(cg, remote_heads, 'push')
1348 return ret[1]
1353 return ret[1]
1349
1354
1350 def changegroupinfo(self, nodes):
1355 def changegroupinfo(self, nodes):
1351 self.ui.note(_("%d changesets found\n") % len(nodes))
1356 self.ui.note(_("%d changesets found\n") % len(nodes))
1352 if self.ui.debugflag:
1357 if self.ui.debugflag:
1353 self.ui.debug(_("List of changesets:\n"))
1358 self.ui.debug(_("List of changesets:\n"))
1354 for node in nodes:
1359 for node in nodes:
1355 self.ui.debug("%s\n" % hex(node))
1360 self.ui.debug("%s\n" % hex(node))
1356
1361
1357 def changegroupsubset(self, bases, heads, source):
1362 def changegroupsubset(self, bases, heads, source):
1358 """This function generates a changegroup consisting of all the nodes
1363 """This function generates a changegroup consisting of all the nodes
1359 that are descendents of any of the bases, and ancestors of any of
1364 that are descendents of any of the bases, and ancestors of any of
1360 the heads.
1365 the heads.
1361
1366
1362 It is fairly complex as determining which filenodes and which
1367 It is fairly complex as determining which filenodes and which
1363 manifest nodes need to be included for the changeset to be complete
1368 manifest nodes need to be included for the changeset to be complete
1364 is non-trivial.
1369 is non-trivial.
1365
1370
1366 Another wrinkle is doing the reverse, figuring out which changeset in
1371 Another wrinkle is doing the reverse, figuring out which changeset in
1367 the changegroup a particular filenode or manifestnode belongs to."""
1372 the changegroup a particular filenode or manifestnode belongs to."""
1368
1373
1369 self.hook('preoutgoing', throw=True, source=source)
1374 self.hook('preoutgoing', throw=True, source=source)
1370
1375
1371 # Set up some initial variables
1376 # Set up some initial variables
1372 # Make it easy to refer to self.changelog
1377 # Make it easy to refer to self.changelog
1373 cl = self.changelog
1378 cl = self.changelog
1374 # msng is short for missing - compute the list of changesets in this
1379 # msng is short for missing - compute the list of changesets in this
1375 # changegroup.
1380 # changegroup.
1376 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1381 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1377 self.changegroupinfo(msng_cl_lst)
1382 self.changegroupinfo(msng_cl_lst)
1378 # Some bases may turn out to be superfluous, and some heads may be
1383 # Some bases may turn out to be superfluous, and some heads may be
1379 # too. nodesbetween will return the minimal set of bases and heads
1384 # too. nodesbetween will return the minimal set of bases and heads
1380 # necessary to re-create the changegroup.
1385 # necessary to re-create the changegroup.
1381
1386
1382 # Known heads are the list of heads that it is assumed the recipient
1387 # Known heads are the list of heads that it is assumed the recipient
1383 # of this changegroup will know about.
1388 # of this changegroup will know about.
1384 knownheads = {}
1389 knownheads = {}
1385 # We assume that all parents of bases are known heads.
1390 # We assume that all parents of bases are known heads.
1386 for n in bases:
1391 for n in bases:
1387 for p in cl.parents(n):
1392 for p in cl.parents(n):
1388 if p != nullid:
1393 if p != nullid:
1389 knownheads[p] = 1
1394 knownheads[p] = 1
1390 knownheads = knownheads.keys()
1395 knownheads = knownheads.keys()
1391 if knownheads:
1396 if knownheads:
1392 # Now that we know what heads are known, we can compute which
1397 # Now that we know what heads are known, we can compute which
1393 # changesets are known. The recipient must know about all
1398 # changesets are known. The recipient must know about all
1394 # changesets required to reach the known heads from the null
1399 # changesets required to reach the known heads from the null
1395 # changeset.
1400 # changeset.
1396 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1401 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1397 junk = None
1402 junk = None
1398 # Transform the list into an ersatz set.
1403 # Transform the list into an ersatz set.
1399 has_cl_set = dict.fromkeys(has_cl_set)
1404 has_cl_set = dict.fromkeys(has_cl_set)
1400 else:
1405 else:
1401 # If there were no known heads, the recipient cannot be assumed to
1406 # If there were no known heads, the recipient cannot be assumed to
1402 # know about any changesets.
1407 # know about any changesets.
1403 has_cl_set = {}
1408 has_cl_set = {}
1404
1409
1405 # Make it easy to refer to self.manifest
1410 # Make it easy to refer to self.manifest
1406 mnfst = self.manifest
1411 mnfst = self.manifest
1407 # We don't know which manifests are missing yet
1412 # We don't know which manifests are missing yet
1408 msng_mnfst_set = {}
1413 msng_mnfst_set = {}
1409 # Nor do we know which filenodes are missing.
1414 # Nor do we know which filenodes are missing.
1410 msng_filenode_set = {}
1415 msng_filenode_set = {}
1411
1416
1412 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1417 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1413 junk = None
1418 junk = None
1414
1419
1415 # A changeset always belongs to itself, so the changenode lookup
1420 # A changeset always belongs to itself, so the changenode lookup
1416 # function for a changenode is identity.
1421 # function for a changenode is identity.
1417 def identity(x):
1422 def identity(x):
1418 return x
1423 return x
1419
1424
1420 # A function generating function. Sets up an environment for the
1425 # A function generating function. Sets up an environment for the
1421 # inner function.
1426 # inner function.
1422 def cmp_by_rev_func(revlog):
1427 def cmp_by_rev_func(revlog):
1423 # Compare two nodes by their revision number in the environment's
1428 # Compare two nodes by their revision number in the environment's
1424 # revision history. Since the revision number both represents the
1429 # revision history. Since the revision number both represents the
1425 # most efficient order to read the nodes in, and represents a
1430 # most efficient order to read the nodes in, and represents a
1426 # topological sorting of the nodes, this function is often useful.
1431 # topological sorting of the nodes, this function is often useful.
1427 def cmp_by_rev(a, b):
1432 def cmp_by_rev(a, b):
1428 return cmp(revlog.rev(a), revlog.rev(b))
1433 return cmp(revlog.rev(a), revlog.rev(b))
1429 return cmp_by_rev
1434 return cmp_by_rev
1430
1435
1431 # If we determine that a particular file or manifest node must be a
1436 # If we determine that a particular file or manifest node must be a
1432 # node that the recipient of the changegroup will already have, we can
1437 # node that the recipient of the changegroup will already have, we can
1433 # also assume the recipient will have all the parents. This function
1438 # also assume the recipient will have all the parents. This function
1434 # prunes them from the set of missing nodes.
1439 # prunes them from the set of missing nodes.
1435 def prune_parents(revlog, hasset, msngset):
1440 def prune_parents(revlog, hasset, msngset):
1436 haslst = hasset.keys()
1441 haslst = hasset.keys()
1437 haslst.sort(cmp_by_rev_func(revlog))
1442 haslst.sort(cmp_by_rev_func(revlog))
1438 for node in haslst:
1443 for node in haslst:
1439 parentlst = [p for p in revlog.parents(node) if p != nullid]
1444 parentlst = [p for p in revlog.parents(node) if p != nullid]
1440 while parentlst:
1445 while parentlst:
1441 n = parentlst.pop()
1446 n = parentlst.pop()
1442 if n not in hasset:
1447 if n not in hasset:
1443 hasset[n] = 1
1448 hasset[n] = 1
1444 p = [p for p in revlog.parents(n) if p != nullid]
1449 p = [p for p in revlog.parents(n) if p != nullid]
1445 parentlst.extend(p)
1450 parentlst.extend(p)
1446 for n in hasset:
1451 for n in hasset:
1447 msngset.pop(n, None)
1452 msngset.pop(n, None)
1448
1453
1449 # This is a function generating function used to set up an environment
1454 # This is a function generating function used to set up an environment
1450 # for the inner function to execute in.
1455 # for the inner function to execute in.
1451 def manifest_and_file_collector(changedfileset):
1456 def manifest_and_file_collector(changedfileset):
1452 # This is an information gathering function that gathers
1457 # This is an information gathering function that gathers
1453 # information from each changeset node that goes out as part of
1458 # information from each changeset node that goes out as part of
1454 # the changegroup. The information gathered is a list of which
1459 # the changegroup. The information gathered is a list of which
1455 # manifest nodes are potentially required (the recipient may
1460 # manifest nodes are potentially required (the recipient may
1456 # already have them) and total list of all files which were
1461 # already have them) and total list of all files which were
1457 # changed in any changeset in the changegroup.
1462 # changed in any changeset in the changegroup.
1458 #
1463 #
1459 # We also remember the first changenode we saw any manifest
1464 # We also remember the first changenode we saw any manifest
1460 # referenced by so we can later determine which changenode 'owns'
1465 # referenced by so we can later determine which changenode 'owns'
1461 # the manifest.
1466 # the manifest.
1462 def collect_manifests_and_files(clnode):
1467 def collect_manifests_and_files(clnode):
1463 c = cl.read(clnode)
1468 c = cl.read(clnode)
1464 for f in c[3]:
1469 for f in c[3]:
1465 # This is to make sure we only have one instance of each
1470 # This is to make sure we only have one instance of each
1466 # filename string for each filename.
1471 # filename string for each filename.
1467 changedfileset.setdefault(f, f)
1472 changedfileset.setdefault(f, f)
1468 msng_mnfst_set.setdefault(c[0], clnode)
1473 msng_mnfst_set.setdefault(c[0], clnode)
1469 return collect_manifests_and_files
1474 return collect_manifests_and_files
1470
1475
1471 # Figure out which manifest nodes (of the ones we think might be part
1476 # Figure out which manifest nodes (of the ones we think might be part
1472 # of the changegroup) the recipient must know about and remove them
1477 # of the changegroup) the recipient must know about and remove them
1473 # from the changegroup.
1478 # from the changegroup.
1474 def prune_manifests():
1479 def prune_manifests():
1475 has_mnfst_set = {}
1480 has_mnfst_set = {}
1476 for n in msng_mnfst_set:
1481 for n in msng_mnfst_set:
1477 # If a 'missing' manifest thinks it belongs to a changenode
1482 # If a 'missing' manifest thinks it belongs to a changenode
1478 # the recipient is assumed to have, obviously the recipient
1483 # the recipient is assumed to have, obviously the recipient
1479 # must have that manifest.
1484 # must have that manifest.
1480 linknode = cl.node(mnfst.linkrev(n))
1485 linknode = cl.node(mnfst.linkrev(n))
1481 if linknode in has_cl_set:
1486 if linknode in has_cl_set:
1482 has_mnfst_set[n] = 1
1487 has_mnfst_set[n] = 1
1483 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1488 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1484
1489
1485 # Use the information collected in collect_manifests_and_files to say
1490 # Use the information collected in collect_manifests_and_files to say
1486 # which changenode any manifestnode belongs to.
1491 # which changenode any manifestnode belongs to.
1487 def lookup_manifest_link(mnfstnode):
1492 def lookup_manifest_link(mnfstnode):
1488 return msng_mnfst_set[mnfstnode]
1493 return msng_mnfst_set[mnfstnode]
1489
1494
1490 # A function generating function that sets up the initial environment
1495 # A function generating function that sets up the initial environment
1491 # the inner function.
1496 # the inner function.
1492 def filenode_collector(changedfiles):
1497 def filenode_collector(changedfiles):
1493 next_rev = [0]
1498 next_rev = [0]
1494 # This gathers information from each manifestnode included in the
1499 # This gathers information from each manifestnode included in the
1495 # changegroup about which filenodes the manifest node references
1500 # changegroup about which filenodes the manifest node references
1496 # so we can include those in the changegroup too.
1501 # so we can include those in the changegroup too.
1497 #
1502 #
1498 # It also remembers which changenode each filenode belongs to. It
1503 # It also remembers which changenode each filenode belongs to. It
1499 # does this by assuming the a filenode belongs to the changenode
1504 # does this by assuming the a filenode belongs to the changenode
1500 # the first manifest that references it belongs to.
1505 # the first manifest that references it belongs to.
1501 def collect_msng_filenodes(mnfstnode):
1506 def collect_msng_filenodes(mnfstnode):
1502 r = mnfst.rev(mnfstnode)
1507 r = mnfst.rev(mnfstnode)
1503 if r == next_rev[0]:
1508 if r == next_rev[0]:
1504 # If the last rev we looked at was the one just previous,
1509 # If the last rev we looked at was the one just previous,
1505 # we only need to see a diff.
1510 # we only need to see a diff.
1506 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1511 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1507 # For each line in the delta
1512 # For each line in the delta
1508 for dline in delta.splitlines():
1513 for dline in delta.splitlines():
1509 # get the filename and filenode for that line
1514 # get the filename and filenode for that line
1510 f, fnode = dline.split('\0')
1515 f, fnode = dline.split('\0')
1511 fnode = bin(fnode[:40])
1516 fnode = bin(fnode[:40])
1512 f = changedfiles.get(f, None)
1517 f = changedfiles.get(f, None)
1513 # And if the file is in the list of files we care
1518 # And if the file is in the list of files we care
1514 # about.
1519 # about.
1515 if f is not None:
1520 if f is not None:
1516 # Get the changenode this manifest belongs to
1521 # Get the changenode this manifest belongs to
1517 clnode = msng_mnfst_set[mnfstnode]
1522 clnode = msng_mnfst_set[mnfstnode]
1518 # Create the set of filenodes for the file if
1523 # Create the set of filenodes for the file if
1519 # there isn't one already.
1524 # there isn't one already.
1520 ndset = msng_filenode_set.setdefault(f, {})
1525 ndset = msng_filenode_set.setdefault(f, {})
1521 # And set the filenode's changelog node to the
1526 # And set the filenode's changelog node to the
1522 # manifest's if it hasn't been set already.
1527 # manifest's if it hasn't been set already.
1523 ndset.setdefault(fnode, clnode)
1528 ndset.setdefault(fnode, clnode)
1524 else:
1529 else:
1525 # Otherwise we need a full manifest.
1530 # Otherwise we need a full manifest.
1526 m = mnfst.read(mnfstnode)
1531 m = mnfst.read(mnfstnode)
1527 # For every file in we care about.
1532 # For every file in we care about.
1528 for f in changedfiles:
1533 for f in changedfiles:
1529 fnode = m.get(f, None)
1534 fnode = m.get(f, None)
1530 # If it's in the manifest
1535 # If it's in the manifest
1531 if fnode is not None:
1536 if fnode is not None:
1532 # See comments above.
1537 # See comments above.
1533 clnode = msng_mnfst_set[mnfstnode]
1538 clnode = msng_mnfst_set[mnfstnode]
1534 ndset = msng_filenode_set.setdefault(f, {})
1539 ndset = msng_filenode_set.setdefault(f, {})
1535 ndset.setdefault(fnode, clnode)
1540 ndset.setdefault(fnode, clnode)
1536 # Remember the revision we hope to see next.
1541 # Remember the revision we hope to see next.
1537 next_rev[0] = r + 1
1542 next_rev[0] = r + 1
1538 return collect_msng_filenodes
1543 return collect_msng_filenodes
1539
1544
1540 # We have a list of filenodes we think we need for a file, lets remove
1545 # We have a list of filenodes we think we need for a file, lets remove
1541 # all those we now the recipient must have.
1546 # all those we now the recipient must have.
1542 def prune_filenodes(f, filerevlog):
1547 def prune_filenodes(f, filerevlog):
1543 msngset = msng_filenode_set[f]
1548 msngset = msng_filenode_set[f]
1544 hasset = {}
1549 hasset = {}
1545 # If a 'missing' filenode thinks it belongs to a changenode we
1550 # If a 'missing' filenode thinks it belongs to a changenode we
1546 # assume the recipient must have, then the recipient must have
1551 # assume the recipient must have, then the recipient must have
1547 # that filenode.
1552 # that filenode.
1548 for n in msngset:
1553 for n in msngset:
1549 clnode = cl.node(filerevlog.linkrev(n))
1554 clnode = cl.node(filerevlog.linkrev(n))
1550 if clnode in has_cl_set:
1555 if clnode in has_cl_set:
1551 hasset[n] = 1
1556 hasset[n] = 1
1552 prune_parents(filerevlog, hasset, msngset)
1557 prune_parents(filerevlog, hasset, msngset)
1553
1558
1554 # A function generator function that sets up the a context for the
1559 # A function generator function that sets up the a context for the
1555 # inner function.
1560 # inner function.
1556 def lookup_filenode_link_func(fname):
1561 def lookup_filenode_link_func(fname):
1557 msngset = msng_filenode_set[fname]
1562 msngset = msng_filenode_set[fname]
1558 # Lookup the changenode the filenode belongs to.
1563 # Lookup the changenode the filenode belongs to.
1559 def lookup_filenode_link(fnode):
1564 def lookup_filenode_link(fnode):
1560 return msngset[fnode]
1565 return msngset[fnode]
1561 return lookup_filenode_link
1566 return lookup_filenode_link
1562
1567
1563 # Now that we have all theses utility functions to help out and
1568 # Now that we have all theses utility functions to help out and
1564 # logically divide up the task, generate the group.
1569 # logically divide up the task, generate the group.
1565 def gengroup():
1570 def gengroup():
1566 # The set of changed files starts empty.
1571 # The set of changed files starts empty.
1567 changedfiles = {}
1572 changedfiles = {}
1568 # Create a changenode group generator that will call our functions
1573 # Create a changenode group generator that will call our functions
1569 # back to lookup the owning changenode and collect information.
1574 # back to lookup the owning changenode and collect information.
1570 group = cl.group(msng_cl_lst, identity,
1575 group = cl.group(msng_cl_lst, identity,
1571 manifest_and_file_collector(changedfiles))
1576 manifest_and_file_collector(changedfiles))
1572 for chnk in group:
1577 for chnk in group:
1573 yield chnk
1578 yield chnk
1574
1579
1575 # The list of manifests has been collected by the generator
1580 # The list of manifests has been collected by the generator
1576 # calling our functions back.
1581 # calling our functions back.
1577 prune_manifests()
1582 prune_manifests()
1578 msng_mnfst_lst = msng_mnfst_set.keys()
1583 msng_mnfst_lst = msng_mnfst_set.keys()
1579 # Sort the manifestnodes by revision number.
1584 # Sort the manifestnodes by revision number.
1580 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1585 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1581 # Create a generator for the manifestnodes that calls our lookup
1586 # Create a generator for the manifestnodes that calls our lookup
1582 # and data collection functions back.
1587 # and data collection functions back.
1583 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1588 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1584 filenode_collector(changedfiles))
1589 filenode_collector(changedfiles))
1585 for chnk in group:
1590 for chnk in group:
1586 yield chnk
1591 yield chnk
1587
1592
1588 # These are no longer needed, dereference and toss the memory for
1593 # These are no longer needed, dereference and toss the memory for
1589 # them.
1594 # them.
1590 msng_mnfst_lst = None
1595 msng_mnfst_lst = None
1591 msng_mnfst_set.clear()
1596 msng_mnfst_set.clear()
1592
1597
1593 changedfiles = changedfiles.keys()
1598 changedfiles = changedfiles.keys()
1594 changedfiles.sort()
1599 changedfiles.sort()
1595 # Go through all our files in order sorted by name.
1600 # Go through all our files in order sorted by name.
1596 for fname in changedfiles:
1601 for fname in changedfiles:
1597 filerevlog = self.file(fname)
1602 filerevlog = self.file(fname)
1598 # Toss out the filenodes that the recipient isn't really
1603 # Toss out the filenodes that the recipient isn't really
1599 # missing.
1604 # missing.
1600 if msng_filenode_set.has_key(fname):
1605 if msng_filenode_set.has_key(fname):
1601 prune_filenodes(fname, filerevlog)
1606 prune_filenodes(fname, filerevlog)
1602 msng_filenode_lst = msng_filenode_set[fname].keys()
1607 msng_filenode_lst = msng_filenode_set[fname].keys()
1603 else:
1608 else:
1604 msng_filenode_lst = []
1609 msng_filenode_lst = []
1605 # If any filenodes are left, generate the group for them,
1610 # If any filenodes are left, generate the group for them,
1606 # otherwise don't bother.
1611 # otherwise don't bother.
1607 if len(msng_filenode_lst) > 0:
1612 if len(msng_filenode_lst) > 0:
1608 yield changegroup.genchunk(fname)
1613 yield changegroup.genchunk(fname)
1609 # Sort the filenodes by their revision #
1614 # Sort the filenodes by their revision #
1610 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1615 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1611 # Create a group generator and only pass in a changenode
1616 # Create a group generator and only pass in a changenode
1612 # lookup function as we need to collect no information
1617 # lookup function as we need to collect no information
1613 # from filenodes.
1618 # from filenodes.
1614 group = filerevlog.group(msng_filenode_lst,
1619 group = filerevlog.group(msng_filenode_lst,
1615 lookup_filenode_link_func(fname))
1620 lookup_filenode_link_func(fname))
1616 for chnk in group:
1621 for chnk in group:
1617 yield chnk
1622 yield chnk
1618 if msng_filenode_set.has_key(fname):
1623 if msng_filenode_set.has_key(fname):
1619 # Don't need this anymore, toss it to free memory.
1624 # Don't need this anymore, toss it to free memory.
1620 del msng_filenode_set[fname]
1625 del msng_filenode_set[fname]
1621 # Signal that no more groups are left.
1626 # Signal that no more groups are left.
1622 yield changegroup.closechunk()
1627 yield changegroup.closechunk()
1623
1628
1624 if msng_cl_lst:
1629 if msng_cl_lst:
1625 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1630 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1626
1631
1627 return util.chunkbuffer(gengroup())
1632 return util.chunkbuffer(gengroup())
1628
1633
1629 def changegroup(self, basenodes, source):
1634 def changegroup(self, basenodes, source):
1630 """Generate a changegroup of all nodes that we have that a recipient
1635 """Generate a changegroup of all nodes that we have that a recipient
1631 doesn't.
1636 doesn't.
1632
1637
1633 This is much easier than the previous function as we can assume that
1638 This is much easier than the previous function as we can assume that
1634 the recipient has any changenode we aren't sending them."""
1639 the recipient has any changenode we aren't sending them."""
1635
1640
1636 self.hook('preoutgoing', throw=True, source=source)
1641 self.hook('preoutgoing', throw=True, source=source)
1637
1642
1638 cl = self.changelog
1643 cl = self.changelog
1639 nodes = cl.nodesbetween(basenodes, None)[0]
1644 nodes = cl.nodesbetween(basenodes, None)[0]
1640 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1645 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1641 self.changegroupinfo(nodes)
1646 self.changegroupinfo(nodes)
1642
1647
1643 def identity(x):
1648 def identity(x):
1644 return x
1649 return x
1645
1650
1646 def gennodelst(revlog):
1651 def gennodelst(revlog):
1647 for r in xrange(0, revlog.count()):
1652 for r in xrange(0, revlog.count()):
1648 n = revlog.node(r)
1653 n = revlog.node(r)
1649 if revlog.linkrev(n) in revset:
1654 if revlog.linkrev(n) in revset:
1650 yield n
1655 yield n
1651
1656
1652 def changed_file_collector(changedfileset):
1657 def changed_file_collector(changedfileset):
1653 def collect_changed_files(clnode):
1658 def collect_changed_files(clnode):
1654 c = cl.read(clnode)
1659 c = cl.read(clnode)
1655 for fname in c[3]:
1660 for fname in c[3]:
1656 changedfileset[fname] = 1
1661 changedfileset[fname] = 1
1657 return collect_changed_files
1662 return collect_changed_files
1658
1663
1659 def lookuprevlink_func(revlog):
1664 def lookuprevlink_func(revlog):
1660 def lookuprevlink(n):
1665 def lookuprevlink(n):
1661 return cl.node(revlog.linkrev(n))
1666 return cl.node(revlog.linkrev(n))
1662 return lookuprevlink
1667 return lookuprevlink
1663
1668
1664 def gengroup():
1669 def gengroup():
1665 # construct a list of all changed files
1670 # construct a list of all changed files
1666 changedfiles = {}
1671 changedfiles = {}
1667
1672
1668 for chnk in cl.group(nodes, identity,
1673 for chnk in cl.group(nodes, identity,
1669 changed_file_collector(changedfiles)):
1674 changed_file_collector(changedfiles)):
1670 yield chnk
1675 yield chnk
1671 changedfiles = changedfiles.keys()
1676 changedfiles = changedfiles.keys()
1672 changedfiles.sort()
1677 changedfiles.sort()
1673
1678
1674 mnfst = self.manifest
1679 mnfst = self.manifest
1675 nodeiter = gennodelst(mnfst)
1680 nodeiter = gennodelst(mnfst)
1676 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1681 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1677 yield chnk
1682 yield chnk
1678
1683
1679 for fname in changedfiles:
1684 for fname in changedfiles:
1680 filerevlog = self.file(fname)
1685 filerevlog = self.file(fname)
1681 nodeiter = gennodelst(filerevlog)
1686 nodeiter = gennodelst(filerevlog)
1682 nodeiter = list(nodeiter)
1687 nodeiter = list(nodeiter)
1683 if nodeiter:
1688 if nodeiter:
1684 yield changegroup.genchunk(fname)
1689 yield changegroup.genchunk(fname)
1685 lookup = lookuprevlink_func(filerevlog)
1690 lookup = lookuprevlink_func(filerevlog)
1686 for chnk in filerevlog.group(nodeiter, lookup):
1691 for chnk in filerevlog.group(nodeiter, lookup):
1687 yield chnk
1692 yield chnk
1688
1693
1689 yield changegroup.closechunk()
1694 yield changegroup.closechunk()
1690
1695
1691 if nodes:
1696 if nodes:
1692 self.hook('outgoing', node=hex(nodes[0]), source=source)
1697 self.hook('outgoing', node=hex(nodes[0]), source=source)
1693
1698
1694 return util.chunkbuffer(gengroup())
1699 return util.chunkbuffer(gengroup())
1695
1700
1696 def addchangegroup(self, source, srctype, url):
1701 def addchangegroup(self, source, srctype, url):
1697 """add changegroup to repo.
1702 """add changegroup to repo.
1698
1703
1699 return values:
1704 return values:
1700 - nothing changed or no source: 0
1705 - nothing changed or no source: 0
1701 - more heads than before: 1+added heads (2..n)
1706 - more heads than before: 1+added heads (2..n)
1702 - less heads than before: -1-removed heads (-2..-n)
1707 - less heads than before: -1-removed heads (-2..-n)
1703 - number of heads stays the same: 1
1708 - number of heads stays the same: 1
1704 """
1709 """
1705 def csmap(x):
1710 def csmap(x):
1706 self.ui.debug(_("add changeset %s\n") % short(x))
1711 self.ui.debug(_("add changeset %s\n") % short(x))
1707 return cl.count()
1712 return cl.count()
1708
1713
1709 def revmap(x):
1714 def revmap(x):
1710 return cl.rev(x)
1715 return cl.rev(x)
1711
1716
1712 if not source:
1717 if not source:
1713 return 0
1718 return 0
1714
1719
1715 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1720 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1716
1721
1717 changesets = files = revisions = 0
1722 changesets = files = revisions = 0
1718
1723
1719 tr = self.transaction()
1724 tr = self.transaction()
1720
1725
1721 # write changelog data to temp files so concurrent readers will not see
1726 # write changelog data to temp files so concurrent readers will not see
1722 # inconsistent view
1727 # inconsistent view
1723 cl = None
1728 cl = None
1724 try:
1729 try:
1725 cl = appendfile.appendchangelog(self.sopener,
1730 cl = appendfile.appendchangelog(self.sopener,
1726 self.changelog.version)
1731 self.changelog.version)
1727
1732
1728 oldheads = len(cl.heads())
1733 oldheads = len(cl.heads())
1729
1734
1730 # pull off the changeset group
1735 # pull off the changeset group
1731 self.ui.status(_("adding changesets\n"))
1736 self.ui.status(_("adding changesets\n"))
1732 cor = cl.count() - 1
1737 cor = cl.count() - 1
1733 chunkiter = changegroup.chunkiter(source)
1738 chunkiter = changegroup.chunkiter(source)
1734 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1739 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1735 raise util.Abort(_("received changelog group is empty"))
1740 raise util.Abort(_("received changelog group is empty"))
1736 cnr = cl.count() - 1
1741 cnr = cl.count() - 1
1737 changesets = cnr - cor
1742 changesets = cnr - cor
1738
1743
1739 # pull off the manifest group
1744 # pull off the manifest group
1740 self.ui.status(_("adding manifests\n"))
1745 self.ui.status(_("adding manifests\n"))
1741 chunkiter = changegroup.chunkiter(source)
1746 chunkiter = changegroup.chunkiter(source)
1742 # no need to check for empty manifest group here:
1747 # no need to check for empty manifest group here:
1743 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1748 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1744 # no new manifest will be created and the manifest group will
1749 # no new manifest will be created and the manifest group will
1745 # be empty during the pull
1750 # be empty during the pull
1746 self.manifest.addgroup(chunkiter, revmap, tr)
1751 self.manifest.addgroup(chunkiter, revmap, tr)
1747
1752
1748 # process the files
1753 # process the files
1749 self.ui.status(_("adding file changes\n"))
1754 self.ui.status(_("adding file changes\n"))
1750 while 1:
1755 while 1:
1751 f = changegroup.getchunk(source)
1756 f = changegroup.getchunk(source)
1752 if not f:
1757 if not f:
1753 break
1758 break
1754 self.ui.debug(_("adding %s revisions\n") % f)
1759 self.ui.debug(_("adding %s revisions\n") % f)
1755 fl = self.file(f)
1760 fl = self.file(f)
1756 o = fl.count()
1761 o = fl.count()
1757 chunkiter = changegroup.chunkiter(source)
1762 chunkiter = changegroup.chunkiter(source)
1758 if fl.addgroup(chunkiter, revmap, tr) is None:
1763 if fl.addgroup(chunkiter, revmap, tr) is None:
1759 raise util.Abort(_("received file revlog group is empty"))
1764 raise util.Abort(_("received file revlog group is empty"))
1760 revisions += fl.count() - o
1765 revisions += fl.count() - o
1761 files += 1
1766 files += 1
1762
1767
1763 cl.writedata()
1768 cl.writedata()
1764 finally:
1769 finally:
1765 if cl:
1770 if cl:
1766 cl.cleanup()
1771 cl.cleanup()
1767
1772
1768 # make changelog see real files again
1773 # make changelog see real files again
1769 self.changelog = changelog.changelog(self.sopener,
1774 self.changelog = changelog.changelog(self.sopener,
1770 self.changelog.version)
1775 self.changelog.version)
1771 self.changelog.checkinlinesize(tr)
1776 self.changelog.checkinlinesize(tr)
1772
1777
1773 newheads = len(self.changelog.heads())
1778 newheads = len(self.changelog.heads())
1774 heads = ""
1779 heads = ""
1775 if oldheads and newheads != oldheads:
1780 if oldheads and newheads != oldheads:
1776 heads = _(" (%+d heads)") % (newheads - oldheads)
1781 heads = _(" (%+d heads)") % (newheads - oldheads)
1777
1782
1778 self.ui.status(_("added %d changesets"
1783 self.ui.status(_("added %d changesets"
1779 " with %d changes to %d files%s\n")
1784 " with %d changes to %d files%s\n")
1780 % (changesets, revisions, files, heads))
1785 % (changesets, revisions, files, heads))
1781
1786
1782 if changesets > 0:
1787 if changesets > 0:
1783 self.hook('pretxnchangegroup', throw=True,
1788 self.hook('pretxnchangegroup', throw=True,
1784 node=hex(self.changelog.node(cor+1)), source=srctype,
1789 node=hex(self.changelog.node(cor+1)), source=srctype,
1785 url=url)
1790 url=url)
1786
1791
1787 tr.close()
1792 tr.close()
1788
1793
1789 if changesets > 0:
1794 if changesets > 0:
1790 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1795 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1791 source=srctype, url=url)
1796 source=srctype, url=url)
1792
1797
1793 for i in xrange(cor + 1, cnr + 1):
1798 for i in xrange(cor + 1, cnr + 1):
1794 self.hook("incoming", node=hex(self.changelog.node(i)),
1799 self.hook("incoming", node=hex(self.changelog.node(i)),
1795 source=srctype, url=url)
1800 source=srctype, url=url)
1796
1801
1797 # never return 0 here:
1802 # never return 0 here:
1798 if newheads < oldheads:
1803 if newheads < oldheads:
1799 return newheads - oldheads - 1
1804 return newheads - oldheads - 1
1800 else:
1805 else:
1801 return newheads - oldheads + 1
1806 return newheads - oldheads + 1
1802
1807
1803
1808
1804 def stream_in(self, remote):
1809 def stream_in(self, remote):
1805 fp = remote.stream_out()
1810 fp = remote.stream_out()
1806 l = fp.readline()
1811 l = fp.readline()
1807 try:
1812 try:
1808 resp = int(l)
1813 resp = int(l)
1809 except ValueError:
1814 except ValueError:
1810 raise util.UnexpectedOutput(
1815 raise util.UnexpectedOutput(
1811 _('Unexpected response from remote server:'), l)
1816 _('Unexpected response from remote server:'), l)
1812 if resp == 1:
1817 if resp == 1:
1813 raise util.Abort(_('operation forbidden by server'))
1818 raise util.Abort(_('operation forbidden by server'))
1814 elif resp == 2:
1819 elif resp == 2:
1815 raise util.Abort(_('locking the remote repository failed'))
1820 raise util.Abort(_('locking the remote repository failed'))
1816 elif resp != 0:
1821 elif resp != 0:
1817 raise util.Abort(_('the server sent an unknown error code'))
1822 raise util.Abort(_('the server sent an unknown error code'))
1818 self.ui.status(_('streaming all changes\n'))
1823 self.ui.status(_('streaming all changes\n'))
1819 l = fp.readline()
1824 l = fp.readline()
1820 try:
1825 try:
1821 total_files, total_bytes = map(int, l.split(' ', 1))
1826 total_files, total_bytes = map(int, l.split(' ', 1))
1822 except ValueError, TypeError:
1827 except ValueError, TypeError:
1823 raise util.UnexpectedOutput(
1828 raise util.UnexpectedOutput(
1824 _('Unexpected response from remote server:'), l)
1829 _('Unexpected response from remote server:'), l)
1825 self.ui.status(_('%d files to transfer, %s of data\n') %
1830 self.ui.status(_('%d files to transfer, %s of data\n') %
1826 (total_files, util.bytecount(total_bytes)))
1831 (total_files, util.bytecount(total_bytes)))
1827 start = time.time()
1832 start = time.time()
1828 for i in xrange(total_files):
1833 for i in xrange(total_files):
1829 # XXX doesn't support '\n' or '\r' in filenames
1834 # XXX doesn't support '\n' or '\r' in filenames
1830 l = fp.readline()
1835 l = fp.readline()
1831 try:
1836 try:
1832 name, size = l.split('\0', 1)
1837 name, size = l.split('\0', 1)
1833 size = int(size)
1838 size = int(size)
1834 except ValueError, TypeError:
1839 except ValueError, TypeError:
1835 raise util.UnexpectedOutput(
1840 raise util.UnexpectedOutput(
1836 _('Unexpected response from remote server:'), l)
1841 _('Unexpected response from remote server:'), l)
1837 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1842 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1838 ofp = self.sopener(name, 'w')
1843 ofp = self.sopener(name, 'w')
1839 for chunk in util.filechunkiter(fp, limit=size):
1844 for chunk in util.filechunkiter(fp, limit=size):
1840 ofp.write(chunk)
1845 ofp.write(chunk)
1841 ofp.close()
1846 ofp.close()
1842 elapsed = time.time() - start
1847 elapsed = time.time() - start
1843 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1848 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1844 (util.bytecount(total_bytes), elapsed,
1849 (util.bytecount(total_bytes), elapsed,
1845 util.bytecount(total_bytes / elapsed)))
1850 util.bytecount(total_bytes / elapsed)))
1846 self.reload()
1851 self.reload()
1847 return len(self.heads()) + 1
1852 return len(self.heads()) + 1
1848
1853
1849 def clone(self, remote, heads=[], stream=False):
1854 def clone(self, remote, heads=[], stream=False):
1850 '''clone remote repository.
1855 '''clone remote repository.
1851
1856
1852 keyword arguments:
1857 keyword arguments:
1853 heads: list of revs to clone (forces use of pull)
1858 heads: list of revs to clone (forces use of pull)
1854 stream: use streaming clone if possible'''
1859 stream: use streaming clone if possible'''
1855
1860
1856 # now, all clients that can request uncompressed clones can
1861 # now, all clients that can request uncompressed clones can
1857 # read repo formats supported by all servers that can serve
1862 # read repo formats supported by all servers that can serve
1858 # them.
1863 # them.
1859
1864
1860 # if revlog format changes, client will have to check version
1865 # if revlog format changes, client will have to check version
1861 # and format flags on "stream" capability, and use
1866 # and format flags on "stream" capability, and use
1862 # uncompressed only if compatible.
1867 # uncompressed only if compatible.
1863
1868
1864 if stream and not heads and remote.capable('stream'):
1869 if stream and not heads and remote.capable('stream'):
1865 return self.stream_in(remote)
1870 return self.stream_in(remote)
1866 return self.pull(remote, heads)
1871 return self.pull(remote, heads)
1867
1872
1868 # used to avoid circular references so destructors work
1873 # used to avoid circular references so destructors work
1869 def aftertrans(files):
1874 def aftertrans(files):
1870 renamefiles = [tuple(t) for t in files]
1875 renamefiles = [tuple(t) for t in files]
1871 def a():
1876 def a():
1872 for src, dest in renamefiles:
1877 for src, dest in renamefiles:
1873 util.rename(src, dest)
1878 util.rename(src, dest)
1874 return a
1879 return a
1875
1880
1876 def instance(ui, path, create):
1881 def instance(ui, path, create):
1877 return localrepository(ui, util.drop_scheme('file', path), create)
1882 return localrepository(ui, util.drop_scheme('file', path), create)
1878
1883
1879 def islocal(path):
1884 def islocal(path):
1880 return True
1885 return True
General Comments 0
You need to be logged in to leave comments. Login now