##// END OF EJS Templates
Create the parent directory when checking out symlinks.
Giorgos Keramidas -
r4137:26596a6b default
parent child Browse files
Show More
@@ -1,1922 +1,1926 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.filterpats = {}
120 self.filterpats = {}
121 self.transhandle = None
121 self.transhandle = None
122
122
123 self._link = lambda x: False
123 self._link = lambda x: False
124 if util.checklink(self.root):
124 if util.checklink(self.root):
125 r = self.root # avoid circular reference in lambda
125 r = self.root # avoid circular reference in lambda
126 self._link = lambda x: util.is_link(os.path.join(r, x))
126 self._link = lambda x: util.is_link(os.path.join(r, x))
127
127
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129
129
130 def url(self):
130 def url(self):
131 return 'file:' + self.root
131 return 'file:' + self.root
132
132
133 def hook(self, name, throw=False, **args):
133 def hook(self, name, throw=False, **args):
134 def callhook(hname, funcname):
134 def callhook(hname, funcname):
135 '''call python hook. hook is callable object, looked up as
135 '''call python hook. hook is callable object, looked up as
136 name in python module. if callable returns "true", hook
136 name in python module. if callable returns "true", hook
137 fails, else passes. if hook raises exception, treated as
137 fails, else passes. if hook raises exception, treated as
138 hook failure. exception propagates if throw is "true".
138 hook failure. exception propagates if throw is "true".
139
139
140 reason for "true" meaning "hook failed" is so that
140 reason for "true" meaning "hook failed" is so that
141 unmodified commands (e.g. mercurial.commands.update) can
141 unmodified commands (e.g. mercurial.commands.update) can
142 be run as hooks without wrappers to convert return values.'''
142 be run as hooks without wrappers to convert return values.'''
143
143
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 obj = funcname
145 obj = funcname
146 if not callable(obj):
146 if not callable(obj):
147 d = funcname.rfind('.')
147 d = funcname.rfind('.')
148 if d == -1:
148 if d == -1:
149 raise util.Abort(_('%s hook is invalid ("%s" not in '
149 raise util.Abort(_('%s hook is invalid ("%s" not in '
150 'a module)') % (hname, funcname))
150 'a module)') % (hname, funcname))
151 modname = funcname[:d]
151 modname = funcname[:d]
152 try:
152 try:
153 obj = __import__(modname)
153 obj = __import__(modname)
154 except ImportError:
154 except ImportError:
155 try:
155 try:
156 # extensions are loaded with hgext_ prefix
156 # extensions are loaded with hgext_ prefix
157 obj = __import__("hgext_%s" % modname)
157 obj = __import__("hgext_%s" % modname)
158 except ImportError:
158 except ImportError:
159 raise util.Abort(_('%s hook is invalid '
159 raise util.Abort(_('%s hook is invalid '
160 '(import of "%s" failed)') %
160 '(import of "%s" failed)') %
161 (hname, modname))
161 (hname, modname))
162 try:
162 try:
163 for p in funcname.split('.')[1:]:
163 for p in funcname.split('.')[1:]:
164 obj = getattr(obj, p)
164 obj = getattr(obj, p)
165 except AttributeError, err:
165 except AttributeError, err:
166 raise util.Abort(_('%s hook is invalid '
166 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not defined)') %
167 '("%s" is not defined)') %
168 (hname, funcname))
168 (hname, funcname))
169 if not callable(obj):
169 if not callable(obj):
170 raise util.Abort(_('%s hook is invalid '
170 raise util.Abort(_('%s hook is invalid '
171 '("%s" is not callable)') %
171 '("%s" is not callable)') %
172 (hname, funcname))
172 (hname, funcname))
173 try:
173 try:
174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
175 except (KeyboardInterrupt, util.SignalInterrupt):
175 except (KeyboardInterrupt, util.SignalInterrupt):
176 raise
176 raise
177 except Exception, exc:
177 except Exception, exc:
178 if isinstance(exc, util.Abort):
178 if isinstance(exc, util.Abort):
179 self.ui.warn(_('error: %s hook failed: %s\n') %
179 self.ui.warn(_('error: %s hook failed: %s\n') %
180 (hname, exc.args[0]))
180 (hname, exc.args[0]))
181 else:
181 else:
182 self.ui.warn(_('error: %s hook raised an exception: '
182 self.ui.warn(_('error: %s hook raised an exception: '
183 '%s\n') % (hname, exc))
183 '%s\n') % (hname, exc))
184 if throw:
184 if throw:
185 raise
185 raise
186 self.ui.print_exc()
186 self.ui.print_exc()
187 return True
187 return True
188 if r:
188 if r:
189 if throw:
189 if throw:
190 raise util.Abort(_('%s hook failed') % hname)
190 raise util.Abort(_('%s hook failed') % hname)
191 self.ui.warn(_('warning: %s hook failed\n') % hname)
191 self.ui.warn(_('warning: %s hook failed\n') % hname)
192 return r
192 return r
193
193
194 def runhook(name, cmd):
194 def runhook(name, cmd):
195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
197 r = util.system(cmd, environ=env, cwd=self.root)
197 r = util.system(cmd, environ=env, cwd=self.root)
198 if r:
198 if r:
199 desc, r = util.explain_exit(r)
199 desc, r = util.explain_exit(r)
200 if throw:
200 if throw:
201 raise util.Abort(_('%s hook %s') % (name, desc))
201 raise util.Abort(_('%s hook %s') % (name, desc))
202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
203 return r
203 return r
204
204
205 r = False
205 r = False
206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
207 if hname.split(".", 1)[0] == name and cmd]
207 if hname.split(".", 1)[0] == name and cmd]
208 hooks.sort()
208 hooks.sort()
209 for hname, cmd in hooks:
209 for hname, cmd in hooks:
210 if callable(cmd):
210 if callable(cmd):
211 r = callhook(hname, cmd) or r
211 r = callhook(hname, cmd) or r
212 elif cmd.startswith('python:'):
212 elif cmd.startswith('python:'):
213 r = callhook(hname, cmd[7:].strip()) or r
213 r = callhook(hname, cmd[7:].strip()) or r
214 else:
214 else:
215 r = runhook(hname, cmd) or r
215 r = runhook(hname, cmd) or r
216 return r
216 return r
217
217
218 tag_disallowed = ':\r\n'
218 tag_disallowed = ':\r\n'
219
219
220 def _tag(self, name, node, message, local, user, date, parent=None):
220 def _tag(self, name, node, message, local, user, date, parent=None):
221 use_dirstate = parent is None
221 use_dirstate = parent is None
222
222
223 for c in self.tag_disallowed:
223 for c in self.tag_disallowed:
224 if c in name:
224 if c in name:
225 raise util.Abort(_('%r cannot be used in a tag name') % c)
225 raise util.Abort(_('%r cannot be used in a tag name') % c)
226
226
227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
228
228
229 if local:
229 if local:
230 # local tags are stored in the current charset
230 # local tags are stored in the current charset
231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
232 self.hook('tag', node=hex(node), tag=name, local=local)
232 self.hook('tag', node=hex(node), tag=name, local=local)
233 return
233 return
234
234
235 # committed tags are stored in UTF-8
235 # committed tags are stored in UTF-8
236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
237 if use_dirstate:
237 if use_dirstate:
238 self.wfile('.hgtags', 'ab').write(line)
238 self.wfile('.hgtags', 'ab').write(line)
239 else:
239 else:
240 ntags = self.filectx('.hgtags', parent).data()
240 ntags = self.filectx('.hgtags', parent).data()
241 self.wfile('.hgtags', 'ab').write(ntags + line)
241 self.wfile('.hgtags', 'ab').write(ntags + line)
242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
243 self.add(['.hgtags'])
243 self.add(['.hgtags'])
244
244
245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
246
246
247 self.hook('tag', node=hex(node), tag=name, local=local)
247 self.hook('tag', node=hex(node), tag=name, local=local)
248
248
249 return tagnode
249 return tagnode
250
250
251 def tag(self, name, node, message, local, user, date):
251 def tag(self, name, node, message, local, user, date):
252 '''tag a revision with a symbolic name.
252 '''tag a revision with a symbolic name.
253
253
254 if local is True, the tag is stored in a per-repository file.
254 if local is True, the tag is stored in a per-repository file.
255 otherwise, it is stored in the .hgtags file, and a new
255 otherwise, it is stored in the .hgtags file, and a new
256 changeset is committed with the change.
256 changeset is committed with the change.
257
257
258 keyword arguments:
258 keyword arguments:
259
259
260 local: whether to store tag in non-version-controlled file
260 local: whether to store tag in non-version-controlled file
261 (default False)
261 (default False)
262
262
263 message: commit message to use if committing
263 message: commit message to use if committing
264
264
265 user: name of user to use if committing
265 user: name of user to use if committing
266
266
267 date: date tuple to use if committing'''
267 date: date tuple to use if committing'''
268
268
269 for x in self.status()[:5]:
269 for x in self.status()[:5]:
270 if '.hgtags' in x:
270 if '.hgtags' in x:
271 raise util.Abort(_('working copy of .hgtags is changed '
271 raise util.Abort(_('working copy of .hgtags is changed '
272 '(please commit .hgtags manually)'))
272 '(please commit .hgtags manually)'))
273
273
274
274
275 self._tag(name, node, message, local, user, date)
275 self._tag(name, node, message, local, user, date)
276
276
277 def tags(self):
277 def tags(self):
278 '''return a mapping of tag to node'''
278 '''return a mapping of tag to node'''
279 if not self.tagscache:
279 if not self.tagscache:
280 self.tagscache = {}
280 self.tagscache = {}
281
281
282 def parsetag(line, context):
282 def parsetag(line, context):
283 if not line:
283 if not line:
284 return
284 return
285 s = l.split(" ", 1)
285 s = l.split(" ", 1)
286 if len(s) != 2:
286 if len(s) != 2:
287 self.ui.warn(_("%s: cannot parse entry\n") % context)
287 self.ui.warn(_("%s: cannot parse entry\n") % context)
288 return
288 return
289 node, key = s
289 node, key = s
290 key = util.tolocal(key.strip()) # stored in UTF-8
290 key = util.tolocal(key.strip()) # stored in UTF-8
291 try:
291 try:
292 bin_n = bin(node)
292 bin_n = bin(node)
293 except TypeError:
293 except TypeError:
294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
295 (context, node))
295 (context, node))
296 return
296 return
297 if bin_n not in self.changelog.nodemap:
297 if bin_n not in self.changelog.nodemap:
298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
299 (context, key))
299 (context, key))
300 return
300 return
301 self.tagscache[key] = bin_n
301 self.tagscache[key] = bin_n
302
302
303 # read the tags file from each head, ending with the tip,
303 # read the tags file from each head, ending with the tip,
304 # and add each tag found to the map, with "newer" ones
304 # and add each tag found to the map, with "newer" ones
305 # taking precedence
305 # taking precedence
306 f = None
306 f = None
307 for rev, node, fnode in self._hgtagsnodes():
307 for rev, node, fnode in self._hgtagsnodes():
308 f = (f and f.filectx(fnode) or
308 f = (f and f.filectx(fnode) or
309 self.filectx('.hgtags', fileid=fnode))
309 self.filectx('.hgtags', fileid=fnode))
310 count = 0
310 count = 0
311 for l in f.data().splitlines():
311 for l in f.data().splitlines():
312 count += 1
312 count += 1
313 parsetag(l, _("%s, line %d") % (str(f), count))
313 parsetag(l, _("%s, line %d") % (str(f), count))
314
314
315 try:
315 try:
316 f = self.opener("localtags")
316 f = self.opener("localtags")
317 count = 0
317 count = 0
318 for l in f:
318 for l in f:
319 # localtags are stored in the local character set
319 # localtags are stored in the local character set
320 # while the internal tag table is stored in UTF-8
320 # while the internal tag table is stored in UTF-8
321 l = util.fromlocal(l)
321 l = util.fromlocal(l)
322 count += 1
322 count += 1
323 parsetag(l, _("localtags, line %d") % count)
323 parsetag(l, _("localtags, line %d") % count)
324 except IOError:
324 except IOError:
325 pass
325 pass
326
326
327 self.tagscache['tip'] = self.changelog.tip()
327 self.tagscache['tip'] = self.changelog.tip()
328
328
329 return self.tagscache
329 return self.tagscache
330
330
331 def _hgtagsnodes(self):
331 def _hgtagsnodes(self):
332 heads = self.heads()
332 heads = self.heads()
333 heads.reverse()
333 heads.reverse()
334 last = {}
334 last = {}
335 ret = []
335 ret = []
336 for node in heads:
336 for node in heads:
337 c = self.changectx(node)
337 c = self.changectx(node)
338 rev = c.rev()
338 rev = c.rev()
339 try:
339 try:
340 fnode = c.filenode('.hgtags')
340 fnode = c.filenode('.hgtags')
341 except revlog.LookupError:
341 except revlog.LookupError:
342 continue
342 continue
343 ret.append((rev, node, fnode))
343 ret.append((rev, node, fnode))
344 if fnode in last:
344 if fnode in last:
345 ret[last[fnode]] = None
345 ret[last[fnode]] = None
346 last[fnode] = len(ret) - 1
346 last[fnode] = len(ret) - 1
347 return [item for item in ret if item]
347 return [item for item in ret if item]
348
348
349 def tagslist(self):
349 def tagslist(self):
350 '''return a list of tags ordered by revision'''
350 '''return a list of tags ordered by revision'''
351 l = []
351 l = []
352 for t, n in self.tags().items():
352 for t, n in self.tags().items():
353 try:
353 try:
354 r = self.changelog.rev(n)
354 r = self.changelog.rev(n)
355 except:
355 except:
356 r = -2 # sort to the beginning of the list if unknown
356 r = -2 # sort to the beginning of the list if unknown
357 l.append((r, t, n))
357 l.append((r, t, n))
358 l.sort()
358 l.sort()
359 return [(t, n) for r, t, n in l]
359 return [(t, n) for r, t, n in l]
360
360
361 def nodetags(self, node):
361 def nodetags(self, node):
362 '''return the tags associated with a node'''
362 '''return the tags associated with a node'''
363 if not self.nodetagscache:
363 if not self.nodetagscache:
364 self.nodetagscache = {}
364 self.nodetagscache = {}
365 for t, n in self.tags().items():
365 for t, n in self.tags().items():
366 self.nodetagscache.setdefault(n, []).append(t)
366 self.nodetagscache.setdefault(n, []).append(t)
367 return self.nodetagscache.get(node, [])
367 return self.nodetagscache.get(node, [])
368
368
369 def _branchtags(self):
369 def _branchtags(self):
370 partial, last, lrev = self._readbranchcache()
370 partial, last, lrev = self._readbranchcache()
371
371
372 tiprev = self.changelog.count() - 1
372 tiprev = self.changelog.count() - 1
373 if lrev != tiprev:
373 if lrev != tiprev:
374 self._updatebranchcache(partial, lrev+1, tiprev+1)
374 self._updatebranchcache(partial, lrev+1, tiprev+1)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376
376
377 return partial
377 return partial
378
378
379 def branchtags(self):
379 def branchtags(self):
380 if self.branchcache is not None:
380 if self.branchcache is not None:
381 return self.branchcache
381 return self.branchcache
382
382
383 self.branchcache = {} # avoid recursion in changectx
383 self.branchcache = {} # avoid recursion in changectx
384 partial = self._branchtags()
384 partial = self._branchtags()
385
385
386 # the branch cache is stored on disk as UTF-8, but in the local
386 # the branch cache is stored on disk as UTF-8, but in the local
387 # charset internally
387 # charset internally
388 for k, v in partial.items():
388 for k, v in partial.items():
389 self.branchcache[util.tolocal(k)] = v
389 self.branchcache[util.tolocal(k)] = v
390 return self.branchcache
390 return self.branchcache
391
391
392 def _readbranchcache(self):
392 def _readbranchcache(self):
393 partial = {}
393 partial = {}
394 try:
394 try:
395 f = self.opener("branches.cache")
395 f = self.opener("branches.cache")
396 lines = f.read().split('\n')
396 lines = f.read().split('\n')
397 f.close()
397 f.close()
398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
399 last, lrev = bin(last), int(lrev)
399 last, lrev = bin(last), int(lrev)
400 if not (lrev < self.changelog.count() and
400 if not (lrev < self.changelog.count() and
401 self.changelog.node(lrev) == last): # sanity check
401 self.changelog.node(lrev) == last): # sanity check
402 # invalidate the cache
402 # invalidate the cache
403 raise ValueError('Invalid branch cache: unknown tip')
403 raise ValueError('Invalid branch cache: unknown tip')
404 for l in lines:
404 for l in lines:
405 if not l: continue
405 if not l: continue
406 node, label = l.rstrip().split(" ", 1)
406 node, label = l.rstrip().split(" ", 1)
407 partial[label] = bin(node)
407 partial[label] = bin(node)
408 except (KeyboardInterrupt, util.SignalInterrupt):
408 except (KeyboardInterrupt, util.SignalInterrupt):
409 raise
409 raise
410 except Exception, inst:
410 except Exception, inst:
411 if self.ui.debugflag:
411 if self.ui.debugflag:
412 self.ui.warn(str(inst), '\n')
412 self.ui.warn(str(inst), '\n')
413 partial, last, lrev = {}, nullid, nullrev
413 partial, last, lrev = {}, nullid, nullrev
414 return partial, last, lrev
414 return partial, last, lrev
415
415
416 def _writebranchcache(self, branches, tip, tiprev):
416 def _writebranchcache(self, branches, tip, tiprev):
417 try:
417 try:
418 f = self.opener("branches.cache", "w")
418 f = self.opener("branches.cache", "w")
419 f.write("%s %s\n" % (hex(tip), tiprev))
419 f.write("%s %s\n" % (hex(tip), tiprev))
420 for label, node in branches.iteritems():
420 for label, node in branches.iteritems():
421 f.write("%s %s\n" % (hex(node), label))
421 f.write("%s %s\n" % (hex(node), label))
422 except IOError:
422 except IOError:
423 pass
423 pass
424
424
425 def _updatebranchcache(self, partial, start, end):
425 def _updatebranchcache(self, partial, start, end):
426 for r in xrange(start, end):
426 for r in xrange(start, end):
427 c = self.changectx(r)
427 c = self.changectx(r)
428 b = c.branch()
428 b = c.branch()
429 if b:
429 if b:
430 partial[b] = c.node()
430 partial[b] = c.node()
431
431
432 def lookup(self, key):
432 def lookup(self, key):
433 if key == '.':
433 if key == '.':
434 key = self.dirstate.parents()[0]
434 key = self.dirstate.parents()[0]
435 if key == nullid:
435 if key == nullid:
436 raise repo.RepoError(_("no revision checked out"))
436 raise repo.RepoError(_("no revision checked out"))
437 elif key == 'null':
437 elif key == 'null':
438 return nullid
438 return nullid
439 n = self.changelog._match(key)
439 n = self.changelog._match(key)
440 if n:
440 if n:
441 return n
441 return n
442 if key in self.tags():
442 if key in self.tags():
443 return self.tags()[key]
443 return self.tags()[key]
444 if key in self.branchtags():
444 if key in self.branchtags():
445 return self.branchtags()[key]
445 return self.branchtags()[key]
446 n = self.changelog._partialmatch(key)
446 n = self.changelog._partialmatch(key)
447 if n:
447 if n:
448 return n
448 return n
449 raise repo.RepoError(_("unknown revision '%s'") % key)
449 raise repo.RepoError(_("unknown revision '%s'") % key)
450
450
451 def dev(self):
451 def dev(self):
452 return os.lstat(self.path).st_dev
452 return os.lstat(self.path).st_dev
453
453
454 def local(self):
454 def local(self):
455 return True
455 return True
456
456
457 def join(self, f):
457 def join(self, f):
458 return os.path.join(self.path, f)
458 return os.path.join(self.path, f)
459
459
460 def sjoin(self, f):
460 def sjoin(self, f):
461 f = self.encodefn(f)
461 f = self.encodefn(f)
462 return os.path.join(self.spath, f)
462 return os.path.join(self.spath, f)
463
463
464 def wjoin(self, f):
464 def wjoin(self, f):
465 return os.path.join(self.root, f)
465 return os.path.join(self.root, f)
466
466
467 def file(self, f):
467 def file(self, f):
468 if f[0] == '/':
468 if f[0] == '/':
469 f = f[1:]
469 f = f[1:]
470 return filelog.filelog(self.sopener, f, self.revlogversion)
470 return filelog.filelog(self.sopener, f, self.revlogversion)
471
471
472 def changectx(self, changeid=None):
472 def changectx(self, changeid=None):
473 return context.changectx(self, changeid)
473 return context.changectx(self, changeid)
474
474
475 def workingctx(self):
475 def workingctx(self):
476 return context.workingctx(self)
476 return context.workingctx(self)
477
477
478 def parents(self, changeid=None):
478 def parents(self, changeid=None):
479 '''
479 '''
480 get list of changectxs for parents of changeid or working directory
480 get list of changectxs for parents of changeid or working directory
481 '''
481 '''
482 if changeid is None:
482 if changeid is None:
483 pl = self.dirstate.parents()
483 pl = self.dirstate.parents()
484 else:
484 else:
485 n = self.changelog.lookup(changeid)
485 n = self.changelog.lookup(changeid)
486 pl = self.changelog.parents(n)
486 pl = self.changelog.parents(n)
487 if pl[1] == nullid:
487 if pl[1] == nullid:
488 return [self.changectx(pl[0])]
488 return [self.changectx(pl[0])]
489 return [self.changectx(pl[0]), self.changectx(pl[1])]
489 return [self.changectx(pl[0]), self.changectx(pl[1])]
490
490
491 def filectx(self, path, changeid=None, fileid=None):
491 def filectx(self, path, changeid=None, fileid=None):
492 """changeid can be a changeset revision, node, or tag.
492 """changeid can be a changeset revision, node, or tag.
493 fileid can be a file revision or node."""
493 fileid can be a file revision or node."""
494 return context.filectx(self, path, changeid, fileid)
494 return context.filectx(self, path, changeid, fileid)
495
495
496 def getcwd(self):
496 def getcwd(self):
497 return self.dirstate.getcwd()
497 return self.dirstate.getcwd()
498
498
499 def wfile(self, f, mode='r'):
499 def wfile(self, f, mode='r'):
500 return self.wopener(f, mode)
500 return self.wopener(f, mode)
501
501
502 def _filter(self, filter, filename, data):
502 def _filter(self, filter, filename, data):
503 if filter not in self.filterpats:
503 if filter not in self.filterpats:
504 l = []
504 l = []
505 for pat, cmd in self.ui.configitems(filter):
505 for pat, cmd in self.ui.configitems(filter):
506 mf = util.matcher(self.root, "", [pat], [], [])[1]
506 mf = util.matcher(self.root, "", [pat], [], [])[1]
507 l.append((mf, cmd))
507 l.append((mf, cmd))
508 self.filterpats[filter] = l
508 self.filterpats[filter] = l
509
509
510 for mf, cmd in self.filterpats[filter]:
510 for mf, cmd in self.filterpats[filter]:
511 if mf(filename):
511 if mf(filename):
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
513 data = util.filter(data, cmd)
513 data = util.filter(data, cmd)
514 break
514 break
515
515
516 return data
516 return data
517
517
518 def wread(self, filename):
518 def wread(self, filename):
519 if self._link(filename):
519 if self._link(filename):
520 data = os.readlink(self.wjoin(filename))
520 data = os.readlink(self.wjoin(filename))
521 else:
521 else:
522 data = self.wopener(filename, 'r').read()
522 data = self.wopener(filename, 'r').read()
523 return self._filter("encode", filename, data)
523 return self._filter("encode", filename, data)
524
524
525 def wwrite(self, filename, data, flags):
525 def wwrite(self, filename, data, flags):
526 data = self._filter("decode", filename, data)
526 data = self._filter("decode", filename, data)
527 if "l" in flags:
527 if "l" in flags:
528 f = self.wjoin(filename)
528 try:
529 try:
529 os.unlink(self.wjoin(filename))
530 os.unlink(f)
530 except OSError:
531 except OSError:
531 pass
532 pass
532 os.symlink(data, self.wjoin(filename))
533 d = os.path.dirname(f)
534 if not os.path.exists(d):
535 os.makedirs(d)
536 os.symlink(data, f)
533 else:
537 else:
534 try:
538 try:
535 if self._link(filename):
539 if self._link(filename):
536 os.unlink(self.wjoin(filename))
540 os.unlink(self.wjoin(filename))
537 except OSError:
541 except OSError:
538 pass
542 pass
539 self.wopener(filename, 'w').write(data)
543 self.wopener(filename, 'w').write(data)
540 util.set_exec(self.wjoin(filename), "x" in flags)
544 util.set_exec(self.wjoin(filename), "x" in flags)
541
545
542 def wwritedata(self, filename, data):
546 def wwritedata(self, filename, data):
543 return self._filter("decode", filename, data)
547 return self._filter("decode", filename, data)
544
548
545 def transaction(self):
549 def transaction(self):
546 tr = self.transhandle
550 tr = self.transhandle
547 if tr != None and tr.running():
551 if tr != None and tr.running():
548 return tr.nest()
552 return tr.nest()
549
553
550 # save dirstate for rollback
554 # save dirstate for rollback
551 try:
555 try:
552 ds = self.opener("dirstate").read()
556 ds = self.opener("dirstate").read()
553 except IOError:
557 except IOError:
554 ds = ""
558 ds = ""
555 self.opener("journal.dirstate", "w").write(ds)
559 self.opener("journal.dirstate", "w").write(ds)
556
560
557 renames = [(self.sjoin("journal"), self.sjoin("undo")),
561 renames = [(self.sjoin("journal"), self.sjoin("undo")),
558 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
562 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
559 tr = transaction.transaction(self.ui.warn, self.sopener,
563 tr = transaction.transaction(self.ui.warn, self.sopener,
560 self.sjoin("journal"),
564 self.sjoin("journal"),
561 aftertrans(renames))
565 aftertrans(renames))
562 self.transhandle = tr
566 self.transhandle = tr
563 return tr
567 return tr
564
568
565 def recover(self):
569 def recover(self):
566 l = self.lock()
570 l = self.lock()
567 if os.path.exists(self.sjoin("journal")):
571 if os.path.exists(self.sjoin("journal")):
568 self.ui.status(_("rolling back interrupted transaction\n"))
572 self.ui.status(_("rolling back interrupted transaction\n"))
569 transaction.rollback(self.sopener, self.sjoin("journal"))
573 transaction.rollback(self.sopener, self.sjoin("journal"))
570 self.reload()
574 self.reload()
571 return True
575 return True
572 else:
576 else:
573 self.ui.warn(_("no interrupted transaction available\n"))
577 self.ui.warn(_("no interrupted transaction available\n"))
574 return False
578 return False
575
579
576 def rollback(self, wlock=None):
580 def rollback(self, wlock=None):
577 if not wlock:
581 if not wlock:
578 wlock = self.wlock()
582 wlock = self.wlock()
579 l = self.lock()
583 l = self.lock()
580 if os.path.exists(self.sjoin("undo")):
584 if os.path.exists(self.sjoin("undo")):
581 self.ui.status(_("rolling back last transaction\n"))
585 self.ui.status(_("rolling back last transaction\n"))
582 transaction.rollback(self.sopener, self.sjoin("undo"))
586 transaction.rollback(self.sopener, self.sjoin("undo"))
583 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
587 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
584 self.reload()
588 self.reload()
585 self.wreload()
589 self.wreload()
586 else:
590 else:
587 self.ui.warn(_("no rollback information available\n"))
591 self.ui.warn(_("no rollback information available\n"))
588
592
589 def wreload(self):
593 def wreload(self):
590 self.dirstate.read()
594 self.dirstate.read()
591
595
592 def reload(self):
596 def reload(self):
593 self.changelog.load()
597 self.changelog.load()
594 self.manifest.load()
598 self.manifest.load()
595 self.tagscache = None
599 self.tagscache = None
596 self.nodetagscache = None
600 self.nodetagscache = None
597
601
598 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
602 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
599 desc=None):
603 desc=None):
600 try:
604 try:
601 l = lock.lock(lockname, 0, releasefn, desc=desc)
605 l = lock.lock(lockname, 0, releasefn, desc=desc)
602 except lock.LockHeld, inst:
606 except lock.LockHeld, inst:
603 if not wait:
607 if not wait:
604 raise
608 raise
605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
609 self.ui.warn(_("waiting for lock on %s held by %r\n") %
606 (desc, inst.locker))
610 (desc, inst.locker))
607 # default to 600 seconds timeout
611 # default to 600 seconds timeout
608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
612 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
609 releasefn, desc=desc)
613 releasefn, desc=desc)
610 if acquirefn:
614 if acquirefn:
611 acquirefn()
615 acquirefn()
612 return l
616 return l
613
617
614 def lock(self, wait=1):
618 def lock(self, wait=1):
615 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
619 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
616 desc=_('repository %s') % self.origroot)
620 desc=_('repository %s') % self.origroot)
617
621
618 def wlock(self, wait=1):
622 def wlock(self, wait=1):
619 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
623 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
620 self.wreload,
624 self.wreload,
621 desc=_('working directory of %s') % self.origroot)
625 desc=_('working directory of %s') % self.origroot)
622
626
623 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
627 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
624 """
628 """
625 commit an individual file as part of a larger transaction
629 commit an individual file as part of a larger transaction
626 """
630 """
627
631
628 t = self.wread(fn)
632 t = self.wread(fn)
629 fl = self.file(fn)
633 fl = self.file(fn)
630 fp1 = manifest1.get(fn, nullid)
634 fp1 = manifest1.get(fn, nullid)
631 fp2 = manifest2.get(fn, nullid)
635 fp2 = manifest2.get(fn, nullid)
632
636
633 meta = {}
637 meta = {}
634 cp = self.dirstate.copied(fn)
638 cp = self.dirstate.copied(fn)
635 if cp:
639 if cp:
636 # Mark the new revision of this file as a copy of another
640 # Mark the new revision of this file as a copy of another
637 # file. This copy data will effectively act as a parent
641 # file. This copy data will effectively act as a parent
638 # of this new revision. If this is a merge, the first
642 # of this new revision. If this is a merge, the first
639 # parent will be the nullid (meaning "look up the copy data")
643 # parent will be the nullid (meaning "look up the copy data")
640 # and the second one will be the other parent. For example:
644 # and the second one will be the other parent. For example:
641 #
645 #
642 # 0 --- 1 --- 3 rev1 changes file foo
646 # 0 --- 1 --- 3 rev1 changes file foo
643 # \ / rev2 renames foo to bar and changes it
647 # \ / rev2 renames foo to bar and changes it
644 # \- 2 -/ rev3 should have bar with all changes and
648 # \- 2 -/ rev3 should have bar with all changes and
645 # should record that bar descends from
649 # should record that bar descends from
646 # bar in rev2 and foo in rev1
650 # bar in rev2 and foo in rev1
647 #
651 #
648 # this allows this merge to succeed:
652 # this allows this merge to succeed:
649 #
653 #
650 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
654 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
651 # \ / merging rev3 and rev4 should use bar@rev2
655 # \ / merging rev3 and rev4 should use bar@rev2
652 # \- 2 --- 4 as the merge base
656 # \- 2 --- 4 as the merge base
653 #
657 #
654 meta["copy"] = cp
658 meta["copy"] = cp
655 if not manifest2: # not a branch merge
659 if not manifest2: # not a branch merge
656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
660 meta["copyrev"] = hex(manifest1.get(cp, nullid))
657 fp2 = nullid
661 fp2 = nullid
658 elif fp2 != nullid: # copied on remote side
662 elif fp2 != nullid: # copied on remote side
659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
663 meta["copyrev"] = hex(manifest1.get(cp, nullid))
660 elif fp1 != nullid: # copied on local side, reversed
664 elif fp1 != nullid: # copied on local side, reversed
661 meta["copyrev"] = hex(manifest2.get(cp))
665 meta["copyrev"] = hex(manifest2.get(cp))
662 fp2 = fp1
666 fp2 = fp1
663 else: # directory rename
667 else: # directory rename
664 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 meta["copyrev"] = hex(manifest1.get(cp, nullid))
665 self.ui.debug(_(" %s: copy %s:%s\n") %
669 self.ui.debug(_(" %s: copy %s:%s\n") %
666 (fn, cp, meta["copyrev"]))
670 (fn, cp, meta["copyrev"]))
667 fp1 = nullid
671 fp1 = nullid
668 elif fp2 != nullid:
672 elif fp2 != nullid:
669 # is one parent an ancestor of the other?
673 # is one parent an ancestor of the other?
670 fpa = fl.ancestor(fp1, fp2)
674 fpa = fl.ancestor(fp1, fp2)
671 if fpa == fp1:
675 if fpa == fp1:
672 fp1, fp2 = fp2, nullid
676 fp1, fp2 = fp2, nullid
673 elif fpa == fp2:
677 elif fpa == fp2:
674 fp2 = nullid
678 fp2 = nullid
675
679
676 # is the file unmodified from the parent? report existing entry
680 # is the file unmodified from the parent? report existing entry
677 if fp2 == nullid and not fl.cmp(fp1, t):
681 if fp2 == nullid and not fl.cmp(fp1, t):
678 return fp1
682 return fp1
679
683
680 changelist.append(fn)
684 changelist.append(fn)
681 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
685 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
682
686
683 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
687 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
684 if p1 is None:
688 if p1 is None:
685 p1, p2 = self.dirstate.parents()
689 p1, p2 = self.dirstate.parents()
686 return self.commit(files=files, text=text, user=user, date=date,
690 return self.commit(files=files, text=text, user=user, date=date,
687 p1=p1, p2=p2, wlock=wlock, extra=extra)
691 p1=p1, p2=p2, wlock=wlock, extra=extra)
688
692
689 def commit(self, files=None, text="", user=None, date=None,
693 def commit(self, files=None, text="", user=None, date=None,
690 match=util.always, force=False, lock=None, wlock=None,
694 match=util.always, force=False, lock=None, wlock=None,
691 force_editor=False, p1=None, p2=None, extra={}):
695 force_editor=False, p1=None, p2=None, extra={}):
692
696
693 commit = []
697 commit = []
694 remove = []
698 remove = []
695 changed = []
699 changed = []
696 use_dirstate = (p1 is None) # not rawcommit
700 use_dirstate = (p1 is None) # not rawcommit
697 extra = extra.copy()
701 extra = extra.copy()
698
702
699 if use_dirstate:
703 if use_dirstate:
700 if files:
704 if files:
701 for f in files:
705 for f in files:
702 s = self.dirstate.state(f)
706 s = self.dirstate.state(f)
703 if s in 'nmai':
707 if s in 'nmai':
704 commit.append(f)
708 commit.append(f)
705 elif s == 'r':
709 elif s == 'r':
706 remove.append(f)
710 remove.append(f)
707 else:
711 else:
708 self.ui.warn(_("%s not tracked!\n") % f)
712 self.ui.warn(_("%s not tracked!\n") % f)
709 else:
713 else:
710 changes = self.status(match=match)[:5]
714 changes = self.status(match=match)[:5]
711 modified, added, removed, deleted, unknown = changes
715 modified, added, removed, deleted, unknown = changes
712 commit = modified + added
716 commit = modified + added
713 remove = removed
717 remove = removed
714 else:
718 else:
715 commit = files
719 commit = files
716
720
717 if use_dirstate:
721 if use_dirstate:
718 p1, p2 = self.dirstate.parents()
722 p1, p2 = self.dirstate.parents()
719 update_dirstate = True
723 update_dirstate = True
720 else:
724 else:
721 p1, p2 = p1, p2 or nullid
725 p1, p2 = p1, p2 or nullid
722 update_dirstate = (self.dirstate.parents()[0] == p1)
726 update_dirstate = (self.dirstate.parents()[0] == p1)
723
727
724 c1 = self.changelog.read(p1)
728 c1 = self.changelog.read(p1)
725 c2 = self.changelog.read(p2)
729 c2 = self.changelog.read(p2)
726 m1 = self.manifest.read(c1[0]).copy()
730 m1 = self.manifest.read(c1[0]).copy()
727 m2 = self.manifest.read(c2[0])
731 m2 = self.manifest.read(c2[0])
728
732
729 if use_dirstate:
733 if use_dirstate:
730 branchname = self.workingctx().branch()
734 branchname = self.workingctx().branch()
731 try:
735 try:
732 branchname = branchname.decode('UTF-8').encode('UTF-8')
736 branchname = branchname.decode('UTF-8').encode('UTF-8')
733 except UnicodeDecodeError:
737 except UnicodeDecodeError:
734 raise util.Abort(_('branch name not in UTF-8!'))
738 raise util.Abort(_('branch name not in UTF-8!'))
735 else:
739 else:
736 branchname = ""
740 branchname = ""
737
741
738 if use_dirstate:
742 if use_dirstate:
739 oldname = c1[5].get("branch", "") # stored in UTF-8
743 oldname = c1[5].get("branch", "") # stored in UTF-8
740 if not commit and not remove and not force and p2 == nullid and \
744 if not commit and not remove and not force and p2 == nullid and \
741 branchname == oldname:
745 branchname == oldname:
742 self.ui.status(_("nothing changed\n"))
746 self.ui.status(_("nothing changed\n"))
743 return None
747 return None
744
748
745 xp1 = hex(p1)
749 xp1 = hex(p1)
746 if p2 == nullid: xp2 = ''
750 if p2 == nullid: xp2 = ''
747 else: xp2 = hex(p2)
751 else: xp2 = hex(p2)
748
752
749 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
753 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
750
754
751 if not wlock:
755 if not wlock:
752 wlock = self.wlock()
756 wlock = self.wlock()
753 if not lock:
757 if not lock:
754 lock = self.lock()
758 lock = self.lock()
755 tr = self.transaction()
759 tr = self.transaction()
756
760
757 # check in files
761 # check in files
758 new = {}
762 new = {}
759 linkrev = self.changelog.count()
763 linkrev = self.changelog.count()
760 commit.sort()
764 commit.sort()
761 is_exec = util.execfunc(self.root, m1.execf)
765 is_exec = util.execfunc(self.root, m1.execf)
762 is_link = util.linkfunc(self.root, m1.linkf)
766 is_link = util.linkfunc(self.root, m1.linkf)
763 for f in commit:
767 for f in commit:
764 self.ui.note(f + "\n")
768 self.ui.note(f + "\n")
765 try:
769 try:
766 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
770 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
767 m1.set(f, is_exec(f), is_link(f))
771 m1.set(f, is_exec(f), is_link(f))
768 except (OSError, IOError):
772 except (OSError, IOError):
769 if use_dirstate:
773 if use_dirstate:
770 self.ui.warn(_("trouble committing %s!\n") % f)
774 self.ui.warn(_("trouble committing %s!\n") % f)
771 raise
775 raise
772 else:
776 else:
773 remove.append(f)
777 remove.append(f)
774
778
775 # update manifest
779 # update manifest
776 m1.update(new)
780 m1.update(new)
777 remove.sort()
781 remove.sort()
778 removed = []
782 removed = []
779
783
780 for f in remove:
784 for f in remove:
781 if f in m1:
785 if f in m1:
782 del m1[f]
786 del m1[f]
783 removed.append(f)
787 removed.append(f)
784 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
788 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
785
789
786 # add changeset
790 # add changeset
787 new = new.keys()
791 new = new.keys()
788 new.sort()
792 new.sort()
789
793
790 user = user or self.ui.username()
794 user = user or self.ui.username()
791 if not text or force_editor:
795 if not text or force_editor:
792 edittext = []
796 edittext = []
793 if text:
797 if text:
794 edittext.append(text)
798 edittext.append(text)
795 edittext.append("")
799 edittext.append("")
796 edittext.append("HG: user: %s" % user)
800 edittext.append("HG: user: %s" % user)
797 if p2 != nullid:
801 if p2 != nullid:
798 edittext.append("HG: branch merge")
802 edittext.append("HG: branch merge")
799 if branchname:
803 if branchname:
800 edittext.append("HG: branch %s" % util.tolocal(branchname))
804 edittext.append("HG: branch %s" % util.tolocal(branchname))
801 edittext.extend(["HG: changed %s" % f for f in changed])
805 edittext.extend(["HG: changed %s" % f for f in changed])
802 edittext.extend(["HG: removed %s" % f for f in removed])
806 edittext.extend(["HG: removed %s" % f for f in removed])
803 if not changed and not remove:
807 if not changed and not remove:
804 edittext.append("HG: no files changed")
808 edittext.append("HG: no files changed")
805 edittext.append("")
809 edittext.append("")
806 # run editor in the repository root
810 # run editor in the repository root
807 olddir = os.getcwd()
811 olddir = os.getcwd()
808 os.chdir(self.root)
812 os.chdir(self.root)
809 text = self.ui.edit("\n".join(edittext), user)
813 text = self.ui.edit("\n".join(edittext), user)
810 os.chdir(olddir)
814 os.chdir(olddir)
811
815
812 lines = [line.rstrip() for line in text.rstrip().splitlines()]
816 lines = [line.rstrip() for line in text.rstrip().splitlines()]
813 while lines and not lines[0]:
817 while lines and not lines[0]:
814 del lines[0]
818 del lines[0]
815 if not lines:
819 if not lines:
816 return None
820 return None
817 text = '\n'.join(lines)
821 text = '\n'.join(lines)
818 if branchname:
822 if branchname:
819 extra["branch"] = branchname
823 extra["branch"] = branchname
820 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
824 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
821 user, date, extra)
825 user, date, extra)
822 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
826 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
823 parent2=xp2)
827 parent2=xp2)
824 tr.close()
828 tr.close()
825
829
826 if self.branchcache and "branch" in extra:
830 if self.branchcache and "branch" in extra:
827 self.branchcache[util.tolocal(extra["branch"])] = n
831 self.branchcache[util.tolocal(extra["branch"])] = n
828
832
829 if use_dirstate or update_dirstate:
833 if use_dirstate or update_dirstate:
830 self.dirstate.setparents(n)
834 self.dirstate.setparents(n)
831 if use_dirstate:
835 if use_dirstate:
832 self.dirstate.update(new, "n")
836 self.dirstate.update(new, "n")
833 self.dirstate.forget(removed)
837 self.dirstate.forget(removed)
834
838
835 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
839 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
836 return n
840 return n
837
841
838 def walk(self, node=None, files=[], match=util.always, badmatch=None):
842 def walk(self, node=None, files=[], match=util.always, badmatch=None):
839 '''
843 '''
840 walk recursively through the directory tree or a given
844 walk recursively through the directory tree or a given
841 changeset, finding all files matched by the match
845 changeset, finding all files matched by the match
842 function
846 function
843
847
844 results are yielded in a tuple (src, filename), where src
848 results are yielded in a tuple (src, filename), where src
845 is one of:
849 is one of:
846 'f' the file was found in the directory tree
850 'f' the file was found in the directory tree
847 'm' the file was only in the dirstate and not in the tree
851 'm' the file was only in the dirstate and not in the tree
848 'b' file was not found and matched badmatch
852 'b' file was not found and matched badmatch
849 '''
853 '''
850
854
851 if node:
855 if node:
852 fdict = dict.fromkeys(files)
856 fdict = dict.fromkeys(files)
853 for fn in self.manifest.read(self.changelog.read(node)[0]):
857 for fn in self.manifest.read(self.changelog.read(node)[0]):
854 for ffn in fdict:
858 for ffn in fdict:
855 # match if the file is the exact name or a directory
859 # match if the file is the exact name or a directory
856 if ffn == fn or fn.startswith("%s/" % ffn):
860 if ffn == fn or fn.startswith("%s/" % ffn):
857 del fdict[ffn]
861 del fdict[ffn]
858 break
862 break
859 if match(fn):
863 if match(fn):
860 yield 'm', fn
864 yield 'm', fn
861 for fn in fdict:
865 for fn in fdict:
862 if badmatch and badmatch(fn):
866 if badmatch and badmatch(fn):
863 if match(fn):
867 if match(fn):
864 yield 'b', fn
868 yield 'b', fn
865 else:
869 else:
866 self.ui.warn(_('%s: No such file in rev %s\n') % (
870 self.ui.warn(_('%s: No such file in rev %s\n') % (
867 util.pathto(self.getcwd(), fn), short(node)))
871 util.pathto(self.getcwd(), fn), short(node)))
868 else:
872 else:
869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
873 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
870 yield src, fn
874 yield src, fn
871
875
872 def status(self, node1=None, node2=None, files=[], match=util.always,
876 def status(self, node1=None, node2=None, files=[], match=util.always,
873 wlock=None, list_ignored=False, list_clean=False):
877 wlock=None, list_ignored=False, list_clean=False):
874 """return status of files between two nodes or node and working directory
878 """return status of files between two nodes or node and working directory
875
879
876 If node1 is None, use the first dirstate parent instead.
880 If node1 is None, use the first dirstate parent instead.
877 If node2 is None, compare node1 with working directory.
881 If node2 is None, compare node1 with working directory.
878 """
882 """
879
883
880 def fcmp(fn, mf):
884 def fcmp(fn, mf):
881 t1 = self.wread(fn)
885 t1 = self.wread(fn)
882 return self.file(fn).cmp(mf.get(fn, nullid), t1)
886 return self.file(fn).cmp(mf.get(fn, nullid), t1)
883
887
884 def mfmatches(node):
888 def mfmatches(node):
885 change = self.changelog.read(node)
889 change = self.changelog.read(node)
886 mf = self.manifest.read(change[0]).copy()
890 mf = self.manifest.read(change[0]).copy()
887 for fn in mf.keys():
891 for fn in mf.keys():
888 if not match(fn):
892 if not match(fn):
889 del mf[fn]
893 del mf[fn]
890 return mf
894 return mf
891
895
892 modified, added, removed, deleted, unknown = [], [], [], [], []
896 modified, added, removed, deleted, unknown = [], [], [], [], []
893 ignored, clean = [], []
897 ignored, clean = [], []
894
898
895 compareworking = False
899 compareworking = False
896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
900 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
897 compareworking = True
901 compareworking = True
898
902
899 if not compareworking:
903 if not compareworking:
900 # read the manifest from node1 before the manifest from node2,
904 # read the manifest from node1 before the manifest from node2,
901 # so that we'll hit the manifest cache if we're going through
905 # so that we'll hit the manifest cache if we're going through
902 # all the revisions in parent->child order.
906 # all the revisions in parent->child order.
903 mf1 = mfmatches(node1)
907 mf1 = mfmatches(node1)
904
908
905 # are we comparing the working directory?
909 # are we comparing the working directory?
906 if not node2:
910 if not node2:
907 if not wlock:
911 if not wlock:
908 try:
912 try:
909 wlock = self.wlock(wait=0)
913 wlock = self.wlock(wait=0)
910 except lock.LockException:
914 except lock.LockException:
911 wlock = None
915 wlock = None
912 (lookup, modified, added, removed, deleted, unknown,
916 (lookup, modified, added, removed, deleted, unknown,
913 ignored, clean) = self.dirstate.status(files, match,
917 ignored, clean) = self.dirstate.status(files, match,
914 list_ignored, list_clean)
918 list_ignored, list_clean)
915
919
916 # are we comparing working dir against its parent?
920 # are we comparing working dir against its parent?
917 if compareworking:
921 if compareworking:
918 if lookup:
922 if lookup:
919 # do a full compare of any files that might have changed
923 # do a full compare of any files that might have changed
920 mf2 = mfmatches(self.dirstate.parents()[0])
924 mf2 = mfmatches(self.dirstate.parents()[0])
921 for f in lookup:
925 for f in lookup:
922 if fcmp(f, mf2):
926 if fcmp(f, mf2):
923 modified.append(f)
927 modified.append(f)
924 else:
928 else:
925 clean.append(f)
929 clean.append(f)
926 if wlock is not None:
930 if wlock is not None:
927 self.dirstate.update([f], "n")
931 self.dirstate.update([f], "n")
928 else:
932 else:
929 # we are comparing working dir against non-parent
933 # we are comparing working dir against non-parent
930 # generate a pseudo-manifest for the working dir
934 # generate a pseudo-manifest for the working dir
931 # XXX: create it in dirstate.py ?
935 # XXX: create it in dirstate.py ?
932 mf2 = mfmatches(self.dirstate.parents()[0])
936 mf2 = mfmatches(self.dirstate.parents()[0])
933 is_exec = util.execfunc(self.root, mf2.execf)
937 is_exec = util.execfunc(self.root, mf2.execf)
934 is_link = util.linkfunc(self.root, mf2.linkf)
938 is_link = util.linkfunc(self.root, mf2.linkf)
935 for f in lookup + modified + added:
939 for f in lookup + modified + added:
936 mf2[f] = ""
940 mf2[f] = ""
937 mf2.set(f, is_exec(f), is_link(f))
941 mf2.set(f, is_exec(f), is_link(f))
938 for f in removed:
942 for f in removed:
939 if f in mf2:
943 if f in mf2:
940 del mf2[f]
944 del mf2[f]
941 else:
945 else:
942 # we are comparing two revisions
946 # we are comparing two revisions
943 mf2 = mfmatches(node2)
947 mf2 = mfmatches(node2)
944
948
945 if not compareworking:
949 if not compareworking:
946 # flush lists from dirstate before comparing manifests
950 # flush lists from dirstate before comparing manifests
947 modified, added, clean = [], [], []
951 modified, added, clean = [], [], []
948
952
949 # make sure to sort the files so we talk to the disk in a
953 # make sure to sort the files so we talk to the disk in a
950 # reasonable order
954 # reasonable order
951 mf2keys = mf2.keys()
955 mf2keys = mf2.keys()
952 mf2keys.sort()
956 mf2keys.sort()
953 for fn in mf2keys:
957 for fn in mf2keys:
954 if mf1.has_key(fn):
958 if mf1.has_key(fn):
955 if mf1.flags(fn) != mf2.flags(fn) or \
959 if mf1.flags(fn) != mf2.flags(fn) or \
956 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
960 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
957 modified.append(fn)
961 modified.append(fn)
958 elif list_clean:
962 elif list_clean:
959 clean.append(fn)
963 clean.append(fn)
960 del mf1[fn]
964 del mf1[fn]
961 else:
965 else:
962 added.append(fn)
966 added.append(fn)
963
967
964 removed = mf1.keys()
968 removed = mf1.keys()
965
969
966 # sort and return results:
970 # sort and return results:
967 for l in modified, added, removed, deleted, unknown, ignored, clean:
971 for l in modified, added, removed, deleted, unknown, ignored, clean:
968 l.sort()
972 l.sort()
969 return (modified, added, removed, deleted, unknown, ignored, clean)
973 return (modified, added, removed, deleted, unknown, ignored, clean)
970
974
971 def add(self, list, wlock=None):
975 def add(self, list, wlock=None):
972 if not wlock:
976 if not wlock:
973 wlock = self.wlock()
977 wlock = self.wlock()
974 for f in list:
978 for f in list:
975 p = self.wjoin(f)
979 p = self.wjoin(f)
976 islink = os.path.islink(p)
980 islink = os.path.islink(p)
977 if not islink and not os.path.exists(p):
981 if not islink and not os.path.exists(p):
978 self.ui.warn(_("%s does not exist!\n") % f)
982 self.ui.warn(_("%s does not exist!\n") % f)
979 elif not islink and not os.path.isfile(p):
983 elif not islink and not os.path.isfile(p):
980 self.ui.warn(_("%s not added: only files and symlinks "
984 self.ui.warn(_("%s not added: only files and symlinks "
981 "supported currently\n") % f)
985 "supported currently\n") % f)
982 elif self.dirstate.state(f) in 'an':
986 elif self.dirstate.state(f) in 'an':
983 self.ui.warn(_("%s already tracked!\n") % f)
987 self.ui.warn(_("%s already tracked!\n") % f)
984 else:
988 else:
985 self.dirstate.update([f], "a")
989 self.dirstate.update([f], "a")
986
990
987 def forget(self, list, wlock=None):
991 def forget(self, list, wlock=None):
988 if not wlock:
992 if not wlock:
989 wlock = self.wlock()
993 wlock = self.wlock()
990 for f in list:
994 for f in list:
991 if self.dirstate.state(f) not in 'ai':
995 if self.dirstate.state(f) not in 'ai':
992 self.ui.warn(_("%s not added!\n") % f)
996 self.ui.warn(_("%s not added!\n") % f)
993 else:
997 else:
994 self.dirstate.forget([f])
998 self.dirstate.forget([f])
995
999
996 def remove(self, list, unlink=False, wlock=None):
1000 def remove(self, list, unlink=False, wlock=None):
997 if unlink:
1001 if unlink:
998 for f in list:
1002 for f in list:
999 try:
1003 try:
1000 util.unlink(self.wjoin(f))
1004 util.unlink(self.wjoin(f))
1001 except OSError, inst:
1005 except OSError, inst:
1002 if inst.errno != errno.ENOENT:
1006 if inst.errno != errno.ENOENT:
1003 raise
1007 raise
1004 if not wlock:
1008 if not wlock:
1005 wlock = self.wlock()
1009 wlock = self.wlock()
1006 for f in list:
1010 for f in list:
1007 p = self.wjoin(f)
1011 p = self.wjoin(f)
1008 if os.path.exists(p):
1012 if os.path.exists(p):
1009 self.ui.warn(_("%s still exists!\n") % f)
1013 self.ui.warn(_("%s still exists!\n") % f)
1010 elif self.dirstate.state(f) == 'a':
1014 elif self.dirstate.state(f) == 'a':
1011 self.dirstate.forget([f])
1015 self.dirstate.forget([f])
1012 elif f not in self.dirstate:
1016 elif f not in self.dirstate:
1013 self.ui.warn(_("%s not tracked!\n") % f)
1017 self.ui.warn(_("%s not tracked!\n") % f)
1014 else:
1018 else:
1015 self.dirstate.update([f], "r")
1019 self.dirstate.update([f], "r")
1016
1020
1017 def undelete(self, list, wlock=None):
1021 def undelete(self, list, wlock=None):
1018 p = self.dirstate.parents()[0]
1022 p = self.dirstate.parents()[0]
1019 mn = self.changelog.read(p)[0]
1023 mn = self.changelog.read(p)[0]
1020 m = self.manifest.read(mn)
1024 m = self.manifest.read(mn)
1021 if not wlock:
1025 if not wlock:
1022 wlock = self.wlock()
1026 wlock = self.wlock()
1023 for f in list:
1027 for f in list:
1024 if self.dirstate.state(f) not in "r":
1028 if self.dirstate.state(f) not in "r":
1025 self.ui.warn("%s not removed!\n" % f)
1029 self.ui.warn("%s not removed!\n" % f)
1026 else:
1030 else:
1027 t = self.file(f).read(m[f])
1031 t = self.file(f).read(m[f])
1028 self.wwrite(f, t, m.flags(f))
1032 self.wwrite(f, t, m.flags(f))
1029 self.dirstate.update([f], "n")
1033 self.dirstate.update([f], "n")
1030
1034
1031 def copy(self, source, dest, wlock=None):
1035 def copy(self, source, dest, wlock=None):
1032 p = self.wjoin(dest)
1036 p = self.wjoin(dest)
1033 if not os.path.exists(p):
1037 if not os.path.exists(p):
1034 self.ui.warn(_("%s does not exist!\n") % dest)
1038 self.ui.warn(_("%s does not exist!\n") % dest)
1035 elif not os.path.isfile(p):
1039 elif not os.path.isfile(p):
1036 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1040 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1037 else:
1041 else:
1038 if not wlock:
1042 if not wlock:
1039 wlock = self.wlock()
1043 wlock = self.wlock()
1040 if self.dirstate.state(dest) == '?':
1044 if self.dirstate.state(dest) == '?':
1041 self.dirstate.update([dest], "a")
1045 self.dirstate.update([dest], "a")
1042 self.dirstate.copy(source, dest)
1046 self.dirstate.copy(source, dest)
1043
1047
1044 def heads(self, start=None):
1048 def heads(self, start=None):
1045 heads = self.changelog.heads(start)
1049 heads = self.changelog.heads(start)
1046 # sort the output in rev descending order
1050 # sort the output in rev descending order
1047 heads = [(-self.changelog.rev(h), h) for h in heads]
1051 heads = [(-self.changelog.rev(h), h) for h in heads]
1048 heads.sort()
1052 heads.sort()
1049 return [n for (r, n) in heads]
1053 return [n for (r, n) in heads]
1050
1054
1051 def branches(self, nodes):
1055 def branches(self, nodes):
1052 if not nodes:
1056 if not nodes:
1053 nodes = [self.changelog.tip()]
1057 nodes = [self.changelog.tip()]
1054 b = []
1058 b = []
1055 for n in nodes:
1059 for n in nodes:
1056 t = n
1060 t = n
1057 while 1:
1061 while 1:
1058 p = self.changelog.parents(n)
1062 p = self.changelog.parents(n)
1059 if p[1] != nullid or p[0] == nullid:
1063 if p[1] != nullid or p[0] == nullid:
1060 b.append((t, n, p[0], p[1]))
1064 b.append((t, n, p[0], p[1]))
1061 break
1065 break
1062 n = p[0]
1066 n = p[0]
1063 return b
1067 return b
1064
1068
1065 def between(self, pairs):
1069 def between(self, pairs):
1066 r = []
1070 r = []
1067
1071
1068 for top, bottom in pairs:
1072 for top, bottom in pairs:
1069 n, l, i = top, [], 0
1073 n, l, i = top, [], 0
1070 f = 1
1074 f = 1
1071
1075
1072 while n != bottom:
1076 while n != bottom:
1073 p = self.changelog.parents(n)[0]
1077 p = self.changelog.parents(n)[0]
1074 if i == f:
1078 if i == f:
1075 l.append(n)
1079 l.append(n)
1076 f = f * 2
1080 f = f * 2
1077 n = p
1081 n = p
1078 i += 1
1082 i += 1
1079
1083
1080 r.append(l)
1084 r.append(l)
1081
1085
1082 return r
1086 return r
1083
1087
1084 def findincoming(self, remote, base=None, heads=None, force=False):
1088 def findincoming(self, remote, base=None, heads=None, force=False):
1085 """Return list of roots of the subsets of missing nodes from remote
1089 """Return list of roots of the subsets of missing nodes from remote
1086
1090
1087 If base dict is specified, assume that these nodes and their parents
1091 If base dict is specified, assume that these nodes and their parents
1088 exist on the remote side and that no child of a node of base exists
1092 exist on the remote side and that no child of a node of base exists
1089 in both remote and self.
1093 in both remote and self.
1090 Furthermore base will be updated to include the nodes that exists
1094 Furthermore base will be updated to include the nodes that exists
1091 in self and remote but no children exists in self and remote.
1095 in self and remote but no children exists in self and remote.
1092 If a list of heads is specified, return only nodes which are heads
1096 If a list of heads is specified, return only nodes which are heads
1093 or ancestors of these heads.
1097 or ancestors of these heads.
1094
1098
1095 All the ancestors of base are in self and in remote.
1099 All the ancestors of base are in self and in remote.
1096 All the descendants of the list returned are missing in self.
1100 All the descendants of the list returned are missing in self.
1097 (and so we know that the rest of the nodes are missing in remote, see
1101 (and so we know that the rest of the nodes are missing in remote, see
1098 outgoing)
1102 outgoing)
1099 """
1103 """
1100 m = self.changelog.nodemap
1104 m = self.changelog.nodemap
1101 search = []
1105 search = []
1102 fetch = {}
1106 fetch = {}
1103 seen = {}
1107 seen = {}
1104 seenbranch = {}
1108 seenbranch = {}
1105 if base == None:
1109 if base == None:
1106 base = {}
1110 base = {}
1107
1111
1108 if not heads:
1112 if not heads:
1109 heads = remote.heads()
1113 heads = remote.heads()
1110
1114
1111 if self.changelog.tip() == nullid:
1115 if self.changelog.tip() == nullid:
1112 base[nullid] = 1
1116 base[nullid] = 1
1113 if heads != [nullid]:
1117 if heads != [nullid]:
1114 return [nullid]
1118 return [nullid]
1115 return []
1119 return []
1116
1120
1117 # assume we're closer to the tip than the root
1121 # assume we're closer to the tip than the root
1118 # and start by examining the heads
1122 # and start by examining the heads
1119 self.ui.status(_("searching for changes\n"))
1123 self.ui.status(_("searching for changes\n"))
1120
1124
1121 unknown = []
1125 unknown = []
1122 for h in heads:
1126 for h in heads:
1123 if h not in m:
1127 if h not in m:
1124 unknown.append(h)
1128 unknown.append(h)
1125 else:
1129 else:
1126 base[h] = 1
1130 base[h] = 1
1127
1131
1128 if not unknown:
1132 if not unknown:
1129 return []
1133 return []
1130
1134
1131 req = dict.fromkeys(unknown)
1135 req = dict.fromkeys(unknown)
1132 reqcnt = 0
1136 reqcnt = 0
1133
1137
1134 # search through remote branches
1138 # search through remote branches
1135 # a 'branch' here is a linear segment of history, with four parts:
1139 # a 'branch' here is a linear segment of history, with four parts:
1136 # head, root, first parent, second parent
1140 # head, root, first parent, second parent
1137 # (a branch always has two parents (or none) by definition)
1141 # (a branch always has two parents (or none) by definition)
1138 unknown = remote.branches(unknown)
1142 unknown = remote.branches(unknown)
1139 while unknown:
1143 while unknown:
1140 r = []
1144 r = []
1141 while unknown:
1145 while unknown:
1142 n = unknown.pop(0)
1146 n = unknown.pop(0)
1143 if n[0] in seen:
1147 if n[0] in seen:
1144 continue
1148 continue
1145
1149
1146 self.ui.debug(_("examining %s:%s\n")
1150 self.ui.debug(_("examining %s:%s\n")
1147 % (short(n[0]), short(n[1])))
1151 % (short(n[0]), short(n[1])))
1148 if n[0] == nullid: # found the end of the branch
1152 if n[0] == nullid: # found the end of the branch
1149 pass
1153 pass
1150 elif n in seenbranch:
1154 elif n in seenbranch:
1151 self.ui.debug(_("branch already found\n"))
1155 self.ui.debug(_("branch already found\n"))
1152 continue
1156 continue
1153 elif n[1] and n[1] in m: # do we know the base?
1157 elif n[1] and n[1] in m: # do we know the base?
1154 self.ui.debug(_("found incomplete branch %s:%s\n")
1158 self.ui.debug(_("found incomplete branch %s:%s\n")
1155 % (short(n[0]), short(n[1])))
1159 % (short(n[0]), short(n[1])))
1156 search.append(n) # schedule branch range for scanning
1160 search.append(n) # schedule branch range for scanning
1157 seenbranch[n] = 1
1161 seenbranch[n] = 1
1158 else:
1162 else:
1159 if n[1] not in seen and n[1] not in fetch:
1163 if n[1] not in seen and n[1] not in fetch:
1160 if n[2] in m and n[3] in m:
1164 if n[2] in m and n[3] in m:
1161 self.ui.debug(_("found new changeset %s\n") %
1165 self.ui.debug(_("found new changeset %s\n") %
1162 short(n[1]))
1166 short(n[1]))
1163 fetch[n[1]] = 1 # earliest unknown
1167 fetch[n[1]] = 1 # earliest unknown
1164 for p in n[2:4]:
1168 for p in n[2:4]:
1165 if p in m:
1169 if p in m:
1166 base[p] = 1 # latest known
1170 base[p] = 1 # latest known
1167
1171
1168 for p in n[2:4]:
1172 for p in n[2:4]:
1169 if p not in req and p not in m:
1173 if p not in req and p not in m:
1170 r.append(p)
1174 r.append(p)
1171 req[p] = 1
1175 req[p] = 1
1172 seen[n[0]] = 1
1176 seen[n[0]] = 1
1173
1177
1174 if r:
1178 if r:
1175 reqcnt += 1
1179 reqcnt += 1
1176 self.ui.debug(_("request %d: %s\n") %
1180 self.ui.debug(_("request %d: %s\n") %
1177 (reqcnt, " ".join(map(short, r))))
1181 (reqcnt, " ".join(map(short, r))))
1178 for p in xrange(0, len(r), 10):
1182 for p in xrange(0, len(r), 10):
1179 for b in remote.branches(r[p:p+10]):
1183 for b in remote.branches(r[p:p+10]):
1180 self.ui.debug(_("received %s:%s\n") %
1184 self.ui.debug(_("received %s:%s\n") %
1181 (short(b[0]), short(b[1])))
1185 (short(b[0]), short(b[1])))
1182 unknown.append(b)
1186 unknown.append(b)
1183
1187
1184 # do binary search on the branches we found
1188 # do binary search on the branches we found
1185 while search:
1189 while search:
1186 n = search.pop(0)
1190 n = search.pop(0)
1187 reqcnt += 1
1191 reqcnt += 1
1188 l = remote.between([(n[0], n[1])])[0]
1192 l = remote.between([(n[0], n[1])])[0]
1189 l.append(n[1])
1193 l.append(n[1])
1190 p = n[0]
1194 p = n[0]
1191 f = 1
1195 f = 1
1192 for i in l:
1196 for i in l:
1193 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1197 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1194 if i in m:
1198 if i in m:
1195 if f <= 2:
1199 if f <= 2:
1196 self.ui.debug(_("found new branch changeset %s\n") %
1200 self.ui.debug(_("found new branch changeset %s\n") %
1197 short(p))
1201 short(p))
1198 fetch[p] = 1
1202 fetch[p] = 1
1199 base[i] = 1
1203 base[i] = 1
1200 else:
1204 else:
1201 self.ui.debug(_("narrowed branch search to %s:%s\n")
1205 self.ui.debug(_("narrowed branch search to %s:%s\n")
1202 % (short(p), short(i)))
1206 % (short(p), short(i)))
1203 search.append((p, i))
1207 search.append((p, i))
1204 break
1208 break
1205 p, f = i, f * 2
1209 p, f = i, f * 2
1206
1210
1207 # sanity check our fetch list
1211 # sanity check our fetch list
1208 for f in fetch.keys():
1212 for f in fetch.keys():
1209 if f in m:
1213 if f in m:
1210 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1214 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1211
1215
1212 if base.keys() == [nullid]:
1216 if base.keys() == [nullid]:
1213 if force:
1217 if force:
1214 self.ui.warn(_("warning: repository is unrelated\n"))
1218 self.ui.warn(_("warning: repository is unrelated\n"))
1215 else:
1219 else:
1216 raise util.Abort(_("repository is unrelated"))
1220 raise util.Abort(_("repository is unrelated"))
1217
1221
1218 self.ui.debug(_("found new changesets starting at ") +
1222 self.ui.debug(_("found new changesets starting at ") +
1219 " ".join([short(f) for f in fetch]) + "\n")
1223 " ".join([short(f) for f in fetch]) + "\n")
1220
1224
1221 self.ui.debug(_("%d total queries\n") % reqcnt)
1225 self.ui.debug(_("%d total queries\n") % reqcnt)
1222
1226
1223 return fetch.keys()
1227 return fetch.keys()
1224
1228
1225 def findoutgoing(self, remote, base=None, heads=None, force=False):
1229 def findoutgoing(self, remote, base=None, heads=None, force=False):
1226 """Return list of nodes that are roots of subsets not in remote
1230 """Return list of nodes that are roots of subsets not in remote
1227
1231
1228 If base dict is specified, assume that these nodes and their parents
1232 If base dict is specified, assume that these nodes and their parents
1229 exist on the remote side.
1233 exist on the remote side.
1230 If a list of heads is specified, return only nodes which are heads
1234 If a list of heads is specified, return only nodes which are heads
1231 or ancestors of these heads, and return a second element which
1235 or ancestors of these heads, and return a second element which
1232 contains all remote heads which get new children.
1236 contains all remote heads which get new children.
1233 """
1237 """
1234 if base == None:
1238 if base == None:
1235 base = {}
1239 base = {}
1236 self.findincoming(remote, base, heads, force=force)
1240 self.findincoming(remote, base, heads, force=force)
1237
1241
1238 self.ui.debug(_("common changesets up to ")
1242 self.ui.debug(_("common changesets up to ")
1239 + " ".join(map(short, base.keys())) + "\n")
1243 + " ".join(map(short, base.keys())) + "\n")
1240
1244
1241 remain = dict.fromkeys(self.changelog.nodemap)
1245 remain = dict.fromkeys(self.changelog.nodemap)
1242
1246
1243 # prune everything remote has from the tree
1247 # prune everything remote has from the tree
1244 del remain[nullid]
1248 del remain[nullid]
1245 remove = base.keys()
1249 remove = base.keys()
1246 while remove:
1250 while remove:
1247 n = remove.pop(0)
1251 n = remove.pop(0)
1248 if n in remain:
1252 if n in remain:
1249 del remain[n]
1253 del remain[n]
1250 for p in self.changelog.parents(n):
1254 for p in self.changelog.parents(n):
1251 remove.append(p)
1255 remove.append(p)
1252
1256
1253 # find every node whose parents have been pruned
1257 # find every node whose parents have been pruned
1254 subset = []
1258 subset = []
1255 # find every remote head that will get new children
1259 # find every remote head that will get new children
1256 updated_heads = {}
1260 updated_heads = {}
1257 for n in remain:
1261 for n in remain:
1258 p1, p2 = self.changelog.parents(n)
1262 p1, p2 = self.changelog.parents(n)
1259 if p1 not in remain and p2 not in remain:
1263 if p1 not in remain and p2 not in remain:
1260 subset.append(n)
1264 subset.append(n)
1261 if heads:
1265 if heads:
1262 if p1 in heads:
1266 if p1 in heads:
1263 updated_heads[p1] = True
1267 updated_heads[p1] = True
1264 if p2 in heads:
1268 if p2 in heads:
1265 updated_heads[p2] = True
1269 updated_heads[p2] = True
1266
1270
1267 # this is the set of all roots we have to push
1271 # this is the set of all roots we have to push
1268 if heads:
1272 if heads:
1269 return subset, updated_heads.keys()
1273 return subset, updated_heads.keys()
1270 else:
1274 else:
1271 return subset
1275 return subset
1272
1276
1273 def pull(self, remote, heads=None, force=False, lock=None):
1277 def pull(self, remote, heads=None, force=False, lock=None):
1274 mylock = False
1278 mylock = False
1275 if not lock:
1279 if not lock:
1276 lock = self.lock()
1280 lock = self.lock()
1277 mylock = True
1281 mylock = True
1278
1282
1279 try:
1283 try:
1280 fetch = self.findincoming(remote, force=force)
1284 fetch = self.findincoming(remote, force=force)
1281 if fetch == [nullid]:
1285 if fetch == [nullid]:
1282 self.ui.status(_("requesting all changes\n"))
1286 self.ui.status(_("requesting all changes\n"))
1283
1287
1284 if not fetch:
1288 if not fetch:
1285 self.ui.status(_("no changes found\n"))
1289 self.ui.status(_("no changes found\n"))
1286 return 0
1290 return 0
1287
1291
1288 if heads is None:
1292 if heads is None:
1289 cg = remote.changegroup(fetch, 'pull')
1293 cg = remote.changegroup(fetch, 'pull')
1290 else:
1294 else:
1291 if 'changegroupsubset' not in remote.capabilities:
1295 if 'changegroupsubset' not in remote.capabilities:
1292 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1296 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1293 cg = remote.changegroupsubset(fetch, heads, 'pull')
1297 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 return self.addchangegroup(cg, 'pull', remote.url())
1298 return self.addchangegroup(cg, 'pull', remote.url())
1295 finally:
1299 finally:
1296 if mylock:
1300 if mylock:
1297 lock.release()
1301 lock.release()
1298
1302
1299 def push(self, remote, force=False, revs=None):
1303 def push(self, remote, force=False, revs=None):
1300 # there are two ways to push to remote repo:
1304 # there are two ways to push to remote repo:
1301 #
1305 #
1302 # addchangegroup assumes local user can lock remote
1306 # addchangegroup assumes local user can lock remote
1303 # repo (local filesystem, old ssh servers).
1307 # repo (local filesystem, old ssh servers).
1304 #
1308 #
1305 # unbundle assumes local user cannot lock remote repo (new ssh
1309 # unbundle assumes local user cannot lock remote repo (new ssh
1306 # servers, http servers).
1310 # servers, http servers).
1307
1311
1308 if remote.capable('unbundle'):
1312 if remote.capable('unbundle'):
1309 return self.push_unbundle(remote, force, revs)
1313 return self.push_unbundle(remote, force, revs)
1310 return self.push_addchangegroup(remote, force, revs)
1314 return self.push_addchangegroup(remote, force, revs)
1311
1315
1312 def prepush(self, remote, force, revs):
1316 def prepush(self, remote, force, revs):
1313 base = {}
1317 base = {}
1314 remote_heads = remote.heads()
1318 remote_heads = remote.heads()
1315 inc = self.findincoming(remote, base, remote_heads, force=force)
1319 inc = self.findincoming(remote, base, remote_heads, force=force)
1316
1320
1317 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1321 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1318 if revs is not None:
1322 if revs is not None:
1319 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1323 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1320 else:
1324 else:
1321 bases, heads = update, self.changelog.heads()
1325 bases, heads = update, self.changelog.heads()
1322
1326
1323 if not bases:
1327 if not bases:
1324 self.ui.status(_("no changes found\n"))
1328 self.ui.status(_("no changes found\n"))
1325 return None, 1
1329 return None, 1
1326 elif not force:
1330 elif not force:
1327 # check if we're creating new remote heads
1331 # check if we're creating new remote heads
1328 # to be a remote head after push, node must be either
1332 # to be a remote head after push, node must be either
1329 # - unknown locally
1333 # - unknown locally
1330 # - a local outgoing head descended from update
1334 # - a local outgoing head descended from update
1331 # - a remote head that's known locally and not
1335 # - a remote head that's known locally and not
1332 # ancestral to an outgoing head
1336 # ancestral to an outgoing head
1333
1337
1334 warn = 0
1338 warn = 0
1335
1339
1336 if remote_heads == [nullid]:
1340 if remote_heads == [nullid]:
1337 warn = 0
1341 warn = 0
1338 elif not revs and len(heads) > len(remote_heads):
1342 elif not revs and len(heads) > len(remote_heads):
1339 warn = 1
1343 warn = 1
1340 else:
1344 else:
1341 newheads = list(heads)
1345 newheads = list(heads)
1342 for r in remote_heads:
1346 for r in remote_heads:
1343 if r in self.changelog.nodemap:
1347 if r in self.changelog.nodemap:
1344 desc = self.changelog.heads(r, heads)
1348 desc = self.changelog.heads(r, heads)
1345 l = [h for h in heads if h in desc]
1349 l = [h for h in heads if h in desc]
1346 if not l:
1350 if not l:
1347 newheads.append(r)
1351 newheads.append(r)
1348 else:
1352 else:
1349 newheads.append(r)
1353 newheads.append(r)
1350 if len(newheads) > len(remote_heads):
1354 if len(newheads) > len(remote_heads):
1351 warn = 1
1355 warn = 1
1352
1356
1353 if warn:
1357 if warn:
1354 self.ui.warn(_("abort: push creates new remote branches!\n"))
1358 self.ui.warn(_("abort: push creates new remote branches!\n"))
1355 self.ui.status(_("(did you forget to merge?"
1359 self.ui.status(_("(did you forget to merge?"
1356 " use push -f to force)\n"))
1360 " use push -f to force)\n"))
1357 return None, 1
1361 return None, 1
1358 elif inc:
1362 elif inc:
1359 self.ui.warn(_("note: unsynced remote changes!\n"))
1363 self.ui.warn(_("note: unsynced remote changes!\n"))
1360
1364
1361
1365
1362 if revs is None:
1366 if revs is None:
1363 cg = self.changegroup(update, 'push')
1367 cg = self.changegroup(update, 'push')
1364 else:
1368 else:
1365 cg = self.changegroupsubset(update, revs, 'push')
1369 cg = self.changegroupsubset(update, revs, 'push')
1366 return cg, remote_heads
1370 return cg, remote_heads
1367
1371
1368 def push_addchangegroup(self, remote, force, revs):
1372 def push_addchangegroup(self, remote, force, revs):
1369 lock = remote.lock()
1373 lock = remote.lock()
1370
1374
1371 ret = self.prepush(remote, force, revs)
1375 ret = self.prepush(remote, force, revs)
1372 if ret[0] is not None:
1376 if ret[0] is not None:
1373 cg, remote_heads = ret
1377 cg, remote_heads = ret
1374 return remote.addchangegroup(cg, 'push', self.url())
1378 return remote.addchangegroup(cg, 'push', self.url())
1375 return ret[1]
1379 return ret[1]
1376
1380
1377 def push_unbundle(self, remote, force, revs):
1381 def push_unbundle(self, remote, force, revs):
1378 # local repo finds heads on server, finds out what revs it
1382 # local repo finds heads on server, finds out what revs it
1379 # must push. once revs transferred, if server finds it has
1383 # must push. once revs transferred, if server finds it has
1380 # different heads (someone else won commit/push race), server
1384 # different heads (someone else won commit/push race), server
1381 # aborts.
1385 # aborts.
1382
1386
1383 ret = self.prepush(remote, force, revs)
1387 ret = self.prepush(remote, force, revs)
1384 if ret[0] is not None:
1388 if ret[0] is not None:
1385 cg, remote_heads = ret
1389 cg, remote_heads = ret
1386 if force: remote_heads = ['force']
1390 if force: remote_heads = ['force']
1387 return remote.unbundle(cg, remote_heads, 'push')
1391 return remote.unbundle(cg, remote_heads, 'push')
1388 return ret[1]
1392 return ret[1]
1389
1393
1390 def changegroupinfo(self, nodes):
1394 def changegroupinfo(self, nodes):
1391 self.ui.note(_("%d changesets found\n") % len(nodes))
1395 self.ui.note(_("%d changesets found\n") % len(nodes))
1392 if self.ui.debugflag:
1396 if self.ui.debugflag:
1393 self.ui.debug(_("List of changesets:\n"))
1397 self.ui.debug(_("List of changesets:\n"))
1394 for node in nodes:
1398 for node in nodes:
1395 self.ui.debug("%s\n" % hex(node))
1399 self.ui.debug("%s\n" % hex(node))
1396
1400
1397 def changegroupsubset(self, bases, heads, source):
1401 def changegroupsubset(self, bases, heads, source):
1398 """This function generates a changegroup consisting of all the nodes
1402 """This function generates a changegroup consisting of all the nodes
1399 that are descendents of any of the bases, and ancestors of any of
1403 that are descendents of any of the bases, and ancestors of any of
1400 the heads.
1404 the heads.
1401
1405
1402 It is fairly complex as determining which filenodes and which
1406 It is fairly complex as determining which filenodes and which
1403 manifest nodes need to be included for the changeset to be complete
1407 manifest nodes need to be included for the changeset to be complete
1404 is non-trivial.
1408 is non-trivial.
1405
1409
1406 Another wrinkle is doing the reverse, figuring out which changeset in
1410 Another wrinkle is doing the reverse, figuring out which changeset in
1407 the changegroup a particular filenode or manifestnode belongs to."""
1411 the changegroup a particular filenode or manifestnode belongs to."""
1408
1412
1409 self.hook('preoutgoing', throw=True, source=source)
1413 self.hook('preoutgoing', throw=True, source=source)
1410
1414
1411 # Set up some initial variables
1415 # Set up some initial variables
1412 # Make it easy to refer to self.changelog
1416 # Make it easy to refer to self.changelog
1413 cl = self.changelog
1417 cl = self.changelog
1414 # msng is short for missing - compute the list of changesets in this
1418 # msng is short for missing - compute the list of changesets in this
1415 # changegroup.
1419 # changegroup.
1416 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1420 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1417 self.changegroupinfo(msng_cl_lst)
1421 self.changegroupinfo(msng_cl_lst)
1418 # Some bases may turn out to be superfluous, and some heads may be
1422 # Some bases may turn out to be superfluous, and some heads may be
1419 # too. nodesbetween will return the minimal set of bases and heads
1423 # too. nodesbetween will return the minimal set of bases and heads
1420 # necessary to re-create the changegroup.
1424 # necessary to re-create the changegroup.
1421
1425
1422 # Known heads are the list of heads that it is assumed the recipient
1426 # Known heads are the list of heads that it is assumed the recipient
1423 # of this changegroup will know about.
1427 # of this changegroup will know about.
1424 knownheads = {}
1428 knownheads = {}
1425 # We assume that all parents of bases are known heads.
1429 # We assume that all parents of bases are known heads.
1426 for n in bases:
1430 for n in bases:
1427 for p in cl.parents(n):
1431 for p in cl.parents(n):
1428 if p != nullid:
1432 if p != nullid:
1429 knownheads[p] = 1
1433 knownheads[p] = 1
1430 knownheads = knownheads.keys()
1434 knownheads = knownheads.keys()
1431 if knownheads:
1435 if knownheads:
1432 # Now that we know what heads are known, we can compute which
1436 # Now that we know what heads are known, we can compute which
1433 # changesets are known. The recipient must know about all
1437 # changesets are known. The recipient must know about all
1434 # changesets required to reach the known heads from the null
1438 # changesets required to reach the known heads from the null
1435 # changeset.
1439 # changeset.
1436 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1440 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1437 junk = None
1441 junk = None
1438 # Transform the list into an ersatz set.
1442 # Transform the list into an ersatz set.
1439 has_cl_set = dict.fromkeys(has_cl_set)
1443 has_cl_set = dict.fromkeys(has_cl_set)
1440 else:
1444 else:
1441 # If there were no known heads, the recipient cannot be assumed to
1445 # If there were no known heads, the recipient cannot be assumed to
1442 # know about any changesets.
1446 # know about any changesets.
1443 has_cl_set = {}
1447 has_cl_set = {}
1444
1448
1445 # Make it easy to refer to self.manifest
1449 # Make it easy to refer to self.manifest
1446 mnfst = self.manifest
1450 mnfst = self.manifest
1447 # We don't know which manifests are missing yet
1451 # We don't know which manifests are missing yet
1448 msng_mnfst_set = {}
1452 msng_mnfst_set = {}
1449 # Nor do we know which filenodes are missing.
1453 # Nor do we know which filenodes are missing.
1450 msng_filenode_set = {}
1454 msng_filenode_set = {}
1451
1455
1452 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1456 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1453 junk = None
1457 junk = None
1454
1458
1455 # A changeset always belongs to itself, so the changenode lookup
1459 # A changeset always belongs to itself, so the changenode lookup
1456 # function for a changenode is identity.
1460 # function for a changenode is identity.
1457 def identity(x):
1461 def identity(x):
1458 return x
1462 return x
1459
1463
1460 # A function generating function. Sets up an environment for the
1464 # A function generating function. Sets up an environment for the
1461 # inner function.
1465 # inner function.
1462 def cmp_by_rev_func(revlog):
1466 def cmp_by_rev_func(revlog):
1463 # Compare two nodes by their revision number in the environment's
1467 # Compare two nodes by their revision number in the environment's
1464 # revision history. Since the revision number both represents the
1468 # revision history. Since the revision number both represents the
1465 # most efficient order to read the nodes in, and represents a
1469 # most efficient order to read the nodes in, and represents a
1466 # topological sorting of the nodes, this function is often useful.
1470 # topological sorting of the nodes, this function is often useful.
1467 def cmp_by_rev(a, b):
1471 def cmp_by_rev(a, b):
1468 return cmp(revlog.rev(a), revlog.rev(b))
1472 return cmp(revlog.rev(a), revlog.rev(b))
1469 return cmp_by_rev
1473 return cmp_by_rev
1470
1474
1471 # If we determine that a particular file or manifest node must be a
1475 # If we determine that a particular file or manifest node must be a
1472 # node that the recipient of the changegroup will already have, we can
1476 # node that the recipient of the changegroup will already have, we can
1473 # also assume the recipient will have all the parents. This function
1477 # also assume the recipient will have all the parents. This function
1474 # prunes them from the set of missing nodes.
1478 # prunes them from the set of missing nodes.
1475 def prune_parents(revlog, hasset, msngset):
1479 def prune_parents(revlog, hasset, msngset):
1476 haslst = hasset.keys()
1480 haslst = hasset.keys()
1477 haslst.sort(cmp_by_rev_func(revlog))
1481 haslst.sort(cmp_by_rev_func(revlog))
1478 for node in haslst:
1482 for node in haslst:
1479 parentlst = [p for p in revlog.parents(node) if p != nullid]
1483 parentlst = [p for p in revlog.parents(node) if p != nullid]
1480 while parentlst:
1484 while parentlst:
1481 n = parentlst.pop()
1485 n = parentlst.pop()
1482 if n not in hasset:
1486 if n not in hasset:
1483 hasset[n] = 1
1487 hasset[n] = 1
1484 p = [p for p in revlog.parents(n) if p != nullid]
1488 p = [p for p in revlog.parents(n) if p != nullid]
1485 parentlst.extend(p)
1489 parentlst.extend(p)
1486 for n in hasset:
1490 for n in hasset:
1487 msngset.pop(n, None)
1491 msngset.pop(n, None)
1488
1492
1489 # This is a function generating function used to set up an environment
1493 # This is a function generating function used to set up an environment
1490 # for the inner function to execute in.
1494 # for the inner function to execute in.
1491 def manifest_and_file_collector(changedfileset):
1495 def manifest_and_file_collector(changedfileset):
1492 # This is an information gathering function that gathers
1496 # This is an information gathering function that gathers
1493 # information from each changeset node that goes out as part of
1497 # information from each changeset node that goes out as part of
1494 # the changegroup. The information gathered is a list of which
1498 # the changegroup. The information gathered is a list of which
1495 # manifest nodes are potentially required (the recipient may
1499 # manifest nodes are potentially required (the recipient may
1496 # already have them) and total list of all files which were
1500 # already have them) and total list of all files which were
1497 # changed in any changeset in the changegroup.
1501 # changed in any changeset in the changegroup.
1498 #
1502 #
1499 # We also remember the first changenode we saw any manifest
1503 # We also remember the first changenode we saw any manifest
1500 # referenced by so we can later determine which changenode 'owns'
1504 # referenced by so we can later determine which changenode 'owns'
1501 # the manifest.
1505 # the manifest.
1502 def collect_manifests_and_files(clnode):
1506 def collect_manifests_and_files(clnode):
1503 c = cl.read(clnode)
1507 c = cl.read(clnode)
1504 for f in c[3]:
1508 for f in c[3]:
1505 # This is to make sure we only have one instance of each
1509 # This is to make sure we only have one instance of each
1506 # filename string for each filename.
1510 # filename string for each filename.
1507 changedfileset.setdefault(f, f)
1511 changedfileset.setdefault(f, f)
1508 msng_mnfst_set.setdefault(c[0], clnode)
1512 msng_mnfst_set.setdefault(c[0], clnode)
1509 return collect_manifests_and_files
1513 return collect_manifests_and_files
1510
1514
1511 # Figure out which manifest nodes (of the ones we think might be part
1515 # Figure out which manifest nodes (of the ones we think might be part
1512 # of the changegroup) the recipient must know about and remove them
1516 # of the changegroup) the recipient must know about and remove them
1513 # from the changegroup.
1517 # from the changegroup.
1514 def prune_manifests():
1518 def prune_manifests():
1515 has_mnfst_set = {}
1519 has_mnfst_set = {}
1516 for n in msng_mnfst_set:
1520 for n in msng_mnfst_set:
1517 # If a 'missing' manifest thinks it belongs to a changenode
1521 # If a 'missing' manifest thinks it belongs to a changenode
1518 # the recipient is assumed to have, obviously the recipient
1522 # the recipient is assumed to have, obviously the recipient
1519 # must have that manifest.
1523 # must have that manifest.
1520 linknode = cl.node(mnfst.linkrev(n))
1524 linknode = cl.node(mnfst.linkrev(n))
1521 if linknode in has_cl_set:
1525 if linknode in has_cl_set:
1522 has_mnfst_set[n] = 1
1526 has_mnfst_set[n] = 1
1523 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1527 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1524
1528
1525 # Use the information collected in collect_manifests_and_files to say
1529 # Use the information collected in collect_manifests_and_files to say
1526 # which changenode any manifestnode belongs to.
1530 # which changenode any manifestnode belongs to.
1527 def lookup_manifest_link(mnfstnode):
1531 def lookup_manifest_link(mnfstnode):
1528 return msng_mnfst_set[mnfstnode]
1532 return msng_mnfst_set[mnfstnode]
1529
1533
1530 # A function generating function that sets up the initial environment
1534 # A function generating function that sets up the initial environment
1531 # the inner function.
1535 # the inner function.
1532 def filenode_collector(changedfiles):
1536 def filenode_collector(changedfiles):
1533 next_rev = [0]
1537 next_rev = [0]
1534 # This gathers information from each manifestnode included in the
1538 # This gathers information from each manifestnode included in the
1535 # changegroup about which filenodes the manifest node references
1539 # changegroup about which filenodes the manifest node references
1536 # so we can include those in the changegroup too.
1540 # so we can include those in the changegroup too.
1537 #
1541 #
1538 # It also remembers which changenode each filenode belongs to. It
1542 # It also remembers which changenode each filenode belongs to. It
1539 # does this by assuming the a filenode belongs to the changenode
1543 # does this by assuming the a filenode belongs to the changenode
1540 # the first manifest that references it belongs to.
1544 # the first manifest that references it belongs to.
1541 def collect_msng_filenodes(mnfstnode):
1545 def collect_msng_filenodes(mnfstnode):
1542 r = mnfst.rev(mnfstnode)
1546 r = mnfst.rev(mnfstnode)
1543 if r == next_rev[0]:
1547 if r == next_rev[0]:
1544 # If the last rev we looked at was the one just previous,
1548 # If the last rev we looked at was the one just previous,
1545 # we only need to see a diff.
1549 # we only need to see a diff.
1546 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1550 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1547 # For each line in the delta
1551 # For each line in the delta
1548 for dline in delta.splitlines():
1552 for dline in delta.splitlines():
1549 # get the filename and filenode for that line
1553 # get the filename and filenode for that line
1550 f, fnode = dline.split('\0')
1554 f, fnode = dline.split('\0')
1551 fnode = bin(fnode[:40])
1555 fnode = bin(fnode[:40])
1552 f = changedfiles.get(f, None)
1556 f = changedfiles.get(f, None)
1553 # And if the file is in the list of files we care
1557 # And if the file is in the list of files we care
1554 # about.
1558 # about.
1555 if f is not None:
1559 if f is not None:
1556 # Get the changenode this manifest belongs to
1560 # Get the changenode this manifest belongs to
1557 clnode = msng_mnfst_set[mnfstnode]
1561 clnode = msng_mnfst_set[mnfstnode]
1558 # Create the set of filenodes for the file if
1562 # Create the set of filenodes for the file if
1559 # there isn't one already.
1563 # there isn't one already.
1560 ndset = msng_filenode_set.setdefault(f, {})
1564 ndset = msng_filenode_set.setdefault(f, {})
1561 # And set the filenode's changelog node to the
1565 # And set the filenode's changelog node to the
1562 # manifest's if it hasn't been set already.
1566 # manifest's if it hasn't been set already.
1563 ndset.setdefault(fnode, clnode)
1567 ndset.setdefault(fnode, clnode)
1564 else:
1568 else:
1565 # Otherwise we need a full manifest.
1569 # Otherwise we need a full manifest.
1566 m = mnfst.read(mnfstnode)
1570 m = mnfst.read(mnfstnode)
1567 # For every file in we care about.
1571 # For every file in we care about.
1568 for f in changedfiles:
1572 for f in changedfiles:
1569 fnode = m.get(f, None)
1573 fnode = m.get(f, None)
1570 # If it's in the manifest
1574 # If it's in the manifest
1571 if fnode is not None:
1575 if fnode is not None:
1572 # See comments above.
1576 # See comments above.
1573 clnode = msng_mnfst_set[mnfstnode]
1577 clnode = msng_mnfst_set[mnfstnode]
1574 ndset = msng_filenode_set.setdefault(f, {})
1578 ndset = msng_filenode_set.setdefault(f, {})
1575 ndset.setdefault(fnode, clnode)
1579 ndset.setdefault(fnode, clnode)
1576 # Remember the revision we hope to see next.
1580 # Remember the revision we hope to see next.
1577 next_rev[0] = r + 1
1581 next_rev[0] = r + 1
1578 return collect_msng_filenodes
1582 return collect_msng_filenodes
1579
1583
1580 # We have a list of filenodes we think we need for a file, lets remove
1584 # We have a list of filenodes we think we need for a file, lets remove
1581 # all those we now the recipient must have.
1585 # all those we now the recipient must have.
1582 def prune_filenodes(f, filerevlog):
1586 def prune_filenodes(f, filerevlog):
1583 msngset = msng_filenode_set[f]
1587 msngset = msng_filenode_set[f]
1584 hasset = {}
1588 hasset = {}
1585 # If a 'missing' filenode thinks it belongs to a changenode we
1589 # If a 'missing' filenode thinks it belongs to a changenode we
1586 # assume the recipient must have, then the recipient must have
1590 # assume the recipient must have, then the recipient must have
1587 # that filenode.
1591 # that filenode.
1588 for n in msngset:
1592 for n in msngset:
1589 clnode = cl.node(filerevlog.linkrev(n))
1593 clnode = cl.node(filerevlog.linkrev(n))
1590 if clnode in has_cl_set:
1594 if clnode in has_cl_set:
1591 hasset[n] = 1
1595 hasset[n] = 1
1592 prune_parents(filerevlog, hasset, msngset)
1596 prune_parents(filerevlog, hasset, msngset)
1593
1597
1594 # A function generator function that sets up the a context for the
1598 # A function generator function that sets up the a context for the
1595 # inner function.
1599 # inner function.
1596 def lookup_filenode_link_func(fname):
1600 def lookup_filenode_link_func(fname):
1597 msngset = msng_filenode_set[fname]
1601 msngset = msng_filenode_set[fname]
1598 # Lookup the changenode the filenode belongs to.
1602 # Lookup the changenode the filenode belongs to.
1599 def lookup_filenode_link(fnode):
1603 def lookup_filenode_link(fnode):
1600 return msngset[fnode]
1604 return msngset[fnode]
1601 return lookup_filenode_link
1605 return lookup_filenode_link
1602
1606
1603 # Now that we have all theses utility functions to help out and
1607 # Now that we have all theses utility functions to help out and
1604 # logically divide up the task, generate the group.
1608 # logically divide up the task, generate the group.
1605 def gengroup():
1609 def gengroup():
1606 # The set of changed files starts empty.
1610 # The set of changed files starts empty.
1607 changedfiles = {}
1611 changedfiles = {}
1608 # Create a changenode group generator that will call our functions
1612 # Create a changenode group generator that will call our functions
1609 # back to lookup the owning changenode and collect information.
1613 # back to lookup the owning changenode and collect information.
1610 group = cl.group(msng_cl_lst, identity,
1614 group = cl.group(msng_cl_lst, identity,
1611 manifest_and_file_collector(changedfiles))
1615 manifest_and_file_collector(changedfiles))
1612 for chnk in group:
1616 for chnk in group:
1613 yield chnk
1617 yield chnk
1614
1618
1615 # The list of manifests has been collected by the generator
1619 # The list of manifests has been collected by the generator
1616 # calling our functions back.
1620 # calling our functions back.
1617 prune_manifests()
1621 prune_manifests()
1618 msng_mnfst_lst = msng_mnfst_set.keys()
1622 msng_mnfst_lst = msng_mnfst_set.keys()
1619 # Sort the manifestnodes by revision number.
1623 # Sort the manifestnodes by revision number.
1620 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1624 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1621 # Create a generator for the manifestnodes that calls our lookup
1625 # Create a generator for the manifestnodes that calls our lookup
1622 # and data collection functions back.
1626 # and data collection functions back.
1623 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1627 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1624 filenode_collector(changedfiles))
1628 filenode_collector(changedfiles))
1625 for chnk in group:
1629 for chnk in group:
1626 yield chnk
1630 yield chnk
1627
1631
1628 # These are no longer needed, dereference and toss the memory for
1632 # These are no longer needed, dereference and toss the memory for
1629 # them.
1633 # them.
1630 msng_mnfst_lst = None
1634 msng_mnfst_lst = None
1631 msng_mnfst_set.clear()
1635 msng_mnfst_set.clear()
1632
1636
1633 changedfiles = changedfiles.keys()
1637 changedfiles = changedfiles.keys()
1634 changedfiles.sort()
1638 changedfiles.sort()
1635 # Go through all our files in order sorted by name.
1639 # Go through all our files in order sorted by name.
1636 for fname in changedfiles:
1640 for fname in changedfiles:
1637 filerevlog = self.file(fname)
1641 filerevlog = self.file(fname)
1638 # Toss out the filenodes that the recipient isn't really
1642 # Toss out the filenodes that the recipient isn't really
1639 # missing.
1643 # missing.
1640 if msng_filenode_set.has_key(fname):
1644 if msng_filenode_set.has_key(fname):
1641 prune_filenodes(fname, filerevlog)
1645 prune_filenodes(fname, filerevlog)
1642 msng_filenode_lst = msng_filenode_set[fname].keys()
1646 msng_filenode_lst = msng_filenode_set[fname].keys()
1643 else:
1647 else:
1644 msng_filenode_lst = []
1648 msng_filenode_lst = []
1645 # If any filenodes are left, generate the group for them,
1649 # If any filenodes are left, generate the group for them,
1646 # otherwise don't bother.
1650 # otherwise don't bother.
1647 if len(msng_filenode_lst) > 0:
1651 if len(msng_filenode_lst) > 0:
1648 yield changegroup.genchunk(fname)
1652 yield changegroup.genchunk(fname)
1649 # Sort the filenodes by their revision #
1653 # Sort the filenodes by their revision #
1650 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1654 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1651 # Create a group generator and only pass in a changenode
1655 # Create a group generator and only pass in a changenode
1652 # lookup function as we need to collect no information
1656 # lookup function as we need to collect no information
1653 # from filenodes.
1657 # from filenodes.
1654 group = filerevlog.group(msng_filenode_lst,
1658 group = filerevlog.group(msng_filenode_lst,
1655 lookup_filenode_link_func(fname))
1659 lookup_filenode_link_func(fname))
1656 for chnk in group:
1660 for chnk in group:
1657 yield chnk
1661 yield chnk
1658 if msng_filenode_set.has_key(fname):
1662 if msng_filenode_set.has_key(fname):
1659 # Don't need this anymore, toss it to free memory.
1663 # Don't need this anymore, toss it to free memory.
1660 del msng_filenode_set[fname]
1664 del msng_filenode_set[fname]
1661 # Signal that no more groups are left.
1665 # Signal that no more groups are left.
1662 yield changegroup.closechunk()
1666 yield changegroup.closechunk()
1663
1667
1664 if msng_cl_lst:
1668 if msng_cl_lst:
1665 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1669 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1666
1670
1667 return util.chunkbuffer(gengroup())
1671 return util.chunkbuffer(gengroup())
1668
1672
1669 def changegroup(self, basenodes, source):
1673 def changegroup(self, basenodes, source):
1670 """Generate a changegroup of all nodes that we have that a recipient
1674 """Generate a changegroup of all nodes that we have that a recipient
1671 doesn't.
1675 doesn't.
1672
1676
1673 This is much easier than the previous function as we can assume that
1677 This is much easier than the previous function as we can assume that
1674 the recipient has any changenode we aren't sending them."""
1678 the recipient has any changenode we aren't sending them."""
1675
1679
1676 self.hook('preoutgoing', throw=True, source=source)
1680 self.hook('preoutgoing', throw=True, source=source)
1677
1681
1678 cl = self.changelog
1682 cl = self.changelog
1679 nodes = cl.nodesbetween(basenodes, None)[0]
1683 nodes = cl.nodesbetween(basenodes, None)[0]
1680 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1684 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1681 self.changegroupinfo(nodes)
1685 self.changegroupinfo(nodes)
1682
1686
1683 def identity(x):
1687 def identity(x):
1684 return x
1688 return x
1685
1689
1686 def gennodelst(revlog):
1690 def gennodelst(revlog):
1687 for r in xrange(0, revlog.count()):
1691 for r in xrange(0, revlog.count()):
1688 n = revlog.node(r)
1692 n = revlog.node(r)
1689 if revlog.linkrev(n) in revset:
1693 if revlog.linkrev(n) in revset:
1690 yield n
1694 yield n
1691
1695
1692 def changed_file_collector(changedfileset):
1696 def changed_file_collector(changedfileset):
1693 def collect_changed_files(clnode):
1697 def collect_changed_files(clnode):
1694 c = cl.read(clnode)
1698 c = cl.read(clnode)
1695 for fname in c[3]:
1699 for fname in c[3]:
1696 changedfileset[fname] = 1
1700 changedfileset[fname] = 1
1697 return collect_changed_files
1701 return collect_changed_files
1698
1702
1699 def lookuprevlink_func(revlog):
1703 def lookuprevlink_func(revlog):
1700 def lookuprevlink(n):
1704 def lookuprevlink(n):
1701 return cl.node(revlog.linkrev(n))
1705 return cl.node(revlog.linkrev(n))
1702 return lookuprevlink
1706 return lookuprevlink
1703
1707
1704 def gengroup():
1708 def gengroup():
1705 # construct a list of all changed files
1709 # construct a list of all changed files
1706 changedfiles = {}
1710 changedfiles = {}
1707
1711
1708 for chnk in cl.group(nodes, identity,
1712 for chnk in cl.group(nodes, identity,
1709 changed_file_collector(changedfiles)):
1713 changed_file_collector(changedfiles)):
1710 yield chnk
1714 yield chnk
1711 changedfiles = changedfiles.keys()
1715 changedfiles = changedfiles.keys()
1712 changedfiles.sort()
1716 changedfiles.sort()
1713
1717
1714 mnfst = self.manifest
1718 mnfst = self.manifest
1715 nodeiter = gennodelst(mnfst)
1719 nodeiter = gennodelst(mnfst)
1716 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1720 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1717 yield chnk
1721 yield chnk
1718
1722
1719 for fname in changedfiles:
1723 for fname in changedfiles:
1720 filerevlog = self.file(fname)
1724 filerevlog = self.file(fname)
1721 nodeiter = gennodelst(filerevlog)
1725 nodeiter = gennodelst(filerevlog)
1722 nodeiter = list(nodeiter)
1726 nodeiter = list(nodeiter)
1723 if nodeiter:
1727 if nodeiter:
1724 yield changegroup.genchunk(fname)
1728 yield changegroup.genchunk(fname)
1725 lookup = lookuprevlink_func(filerevlog)
1729 lookup = lookuprevlink_func(filerevlog)
1726 for chnk in filerevlog.group(nodeiter, lookup):
1730 for chnk in filerevlog.group(nodeiter, lookup):
1727 yield chnk
1731 yield chnk
1728
1732
1729 yield changegroup.closechunk()
1733 yield changegroup.closechunk()
1730
1734
1731 if nodes:
1735 if nodes:
1732 self.hook('outgoing', node=hex(nodes[0]), source=source)
1736 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733
1737
1734 return util.chunkbuffer(gengroup())
1738 return util.chunkbuffer(gengroup())
1735
1739
1736 def addchangegroup(self, source, srctype, url):
1740 def addchangegroup(self, source, srctype, url):
1737 """add changegroup to repo.
1741 """add changegroup to repo.
1738
1742
1739 return values:
1743 return values:
1740 - nothing changed or no source: 0
1744 - nothing changed or no source: 0
1741 - more heads than before: 1+added heads (2..n)
1745 - more heads than before: 1+added heads (2..n)
1742 - less heads than before: -1-removed heads (-2..-n)
1746 - less heads than before: -1-removed heads (-2..-n)
1743 - number of heads stays the same: 1
1747 - number of heads stays the same: 1
1744 """
1748 """
1745 def csmap(x):
1749 def csmap(x):
1746 self.ui.debug(_("add changeset %s\n") % short(x))
1750 self.ui.debug(_("add changeset %s\n") % short(x))
1747 return cl.count()
1751 return cl.count()
1748
1752
1749 def revmap(x):
1753 def revmap(x):
1750 return cl.rev(x)
1754 return cl.rev(x)
1751
1755
1752 if not source:
1756 if not source:
1753 return 0
1757 return 0
1754
1758
1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1759 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1756
1760
1757 changesets = files = revisions = 0
1761 changesets = files = revisions = 0
1758
1762
1759 tr = self.transaction()
1763 tr = self.transaction()
1760
1764
1761 # write changelog data to temp files so concurrent readers will not see
1765 # write changelog data to temp files so concurrent readers will not see
1762 # inconsistent view
1766 # inconsistent view
1763 cl = None
1767 cl = None
1764 try:
1768 try:
1765 cl = appendfile.appendchangelog(self.sopener,
1769 cl = appendfile.appendchangelog(self.sopener,
1766 self.changelog.version)
1770 self.changelog.version)
1767
1771
1768 oldheads = len(cl.heads())
1772 oldheads = len(cl.heads())
1769
1773
1770 # pull off the changeset group
1774 # pull off the changeset group
1771 self.ui.status(_("adding changesets\n"))
1775 self.ui.status(_("adding changesets\n"))
1772 cor = cl.count() - 1
1776 cor = cl.count() - 1
1773 chunkiter = changegroup.chunkiter(source)
1777 chunkiter = changegroup.chunkiter(source)
1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1778 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1775 raise util.Abort(_("received changelog group is empty"))
1779 raise util.Abort(_("received changelog group is empty"))
1776 cnr = cl.count() - 1
1780 cnr = cl.count() - 1
1777 changesets = cnr - cor
1781 changesets = cnr - cor
1778
1782
1779 # pull off the manifest group
1783 # pull off the manifest group
1780 self.ui.status(_("adding manifests\n"))
1784 self.ui.status(_("adding manifests\n"))
1781 chunkiter = changegroup.chunkiter(source)
1785 chunkiter = changegroup.chunkiter(source)
1782 # no need to check for empty manifest group here:
1786 # no need to check for empty manifest group here:
1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1787 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1784 # no new manifest will be created and the manifest group will
1788 # no new manifest will be created and the manifest group will
1785 # be empty during the pull
1789 # be empty during the pull
1786 self.manifest.addgroup(chunkiter, revmap, tr)
1790 self.manifest.addgroup(chunkiter, revmap, tr)
1787
1791
1788 # process the files
1792 # process the files
1789 self.ui.status(_("adding file changes\n"))
1793 self.ui.status(_("adding file changes\n"))
1790 while 1:
1794 while 1:
1791 f = changegroup.getchunk(source)
1795 f = changegroup.getchunk(source)
1792 if not f:
1796 if not f:
1793 break
1797 break
1794 self.ui.debug(_("adding %s revisions\n") % f)
1798 self.ui.debug(_("adding %s revisions\n") % f)
1795 fl = self.file(f)
1799 fl = self.file(f)
1796 o = fl.count()
1800 o = fl.count()
1797 chunkiter = changegroup.chunkiter(source)
1801 chunkiter = changegroup.chunkiter(source)
1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1802 if fl.addgroup(chunkiter, revmap, tr) is None:
1799 raise util.Abort(_("received file revlog group is empty"))
1803 raise util.Abort(_("received file revlog group is empty"))
1800 revisions += fl.count() - o
1804 revisions += fl.count() - o
1801 files += 1
1805 files += 1
1802
1806
1803 cl.writedata()
1807 cl.writedata()
1804 finally:
1808 finally:
1805 if cl:
1809 if cl:
1806 cl.cleanup()
1810 cl.cleanup()
1807
1811
1808 # make changelog see real files again
1812 # make changelog see real files again
1809 self.changelog = changelog.changelog(self.sopener,
1813 self.changelog = changelog.changelog(self.sopener,
1810 self.changelog.version)
1814 self.changelog.version)
1811 self.changelog.checkinlinesize(tr)
1815 self.changelog.checkinlinesize(tr)
1812
1816
1813 newheads = len(self.changelog.heads())
1817 newheads = len(self.changelog.heads())
1814 heads = ""
1818 heads = ""
1815 if oldheads and newheads != oldheads:
1819 if oldheads and newheads != oldheads:
1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1820 heads = _(" (%+d heads)") % (newheads - oldheads)
1817
1821
1818 self.ui.status(_("added %d changesets"
1822 self.ui.status(_("added %d changesets"
1819 " with %d changes to %d files%s\n")
1823 " with %d changes to %d files%s\n")
1820 % (changesets, revisions, files, heads))
1824 % (changesets, revisions, files, heads))
1821
1825
1822 if changesets > 0:
1826 if changesets > 0:
1823 self.hook('pretxnchangegroup', throw=True,
1827 self.hook('pretxnchangegroup', throw=True,
1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1828 node=hex(self.changelog.node(cor+1)), source=srctype,
1825 url=url)
1829 url=url)
1826
1830
1827 tr.close()
1831 tr.close()
1828
1832
1829 if changesets > 0:
1833 if changesets > 0:
1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1834 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1831 source=srctype, url=url)
1835 source=srctype, url=url)
1832
1836
1833 for i in xrange(cor + 1, cnr + 1):
1837 for i in xrange(cor + 1, cnr + 1):
1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1838 self.hook("incoming", node=hex(self.changelog.node(i)),
1835 source=srctype, url=url)
1839 source=srctype, url=url)
1836
1840
1837 # never return 0 here:
1841 # never return 0 here:
1838 if newheads < oldheads:
1842 if newheads < oldheads:
1839 return newheads - oldheads - 1
1843 return newheads - oldheads - 1
1840 else:
1844 else:
1841 return newheads - oldheads + 1
1845 return newheads - oldheads + 1
1842
1846
1843
1847
1844 def stream_in(self, remote):
1848 def stream_in(self, remote):
1845 fp = remote.stream_out()
1849 fp = remote.stream_out()
1846 l = fp.readline()
1850 l = fp.readline()
1847 try:
1851 try:
1848 resp = int(l)
1852 resp = int(l)
1849 except ValueError:
1853 except ValueError:
1850 raise util.UnexpectedOutput(
1854 raise util.UnexpectedOutput(
1851 _('Unexpected response from remote server:'), l)
1855 _('Unexpected response from remote server:'), l)
1852 if resp == 1:
1856 if resp == 1:
1853 raise util.Abort(_('operation forbidden by server'))
1857 raise util.Abort(_('operation forbidden by server'))
1854 elif resp == 2:
1858 elif resp == 2:
1855 raise util.Abort(_('locking the remote repository failed'))
1859 raise util.Abort(_('locking the remote repository failed'))
1856 elif resp != 0:
1860 elif resp != 0:
1857 raise util.Abort(_('the server sent an unknown error code'))
1861 raise util.Abort(_('the server sent an unknown error code'))
1858 self.ui.status(_('streaming all changes\n'))
1862 self.ui.status(_('streaming all changes\n'))
1859 l = fp.readline()
1863 l = fp.readline()
1860 try:
1864 try:
1861 total_files, total_bytes = map(int, l.split(' ', 1))
1865 total_files, total_bytes = map(int, l.split(' ', 1))
1862 except ValueError, TypeError:
1866 except ValueError, TypeError:
1863 raise util.UnexpectedOutput(
1867 raise util.UnexpectedOutput(
1864 _('Unexpected response from remote server:'), l)
1868 _('Unexpected response from remote server:'), l)
1865 self.ui.status(_('%d files to transfer, %s of data\n') %
1869 self.ui.status(_('%d files to transfer, %s of data\n') %
1866 (total_files, util.bytecount(total_bytes)))
1870 (total_files, util.bytecount(total_bytes)))
1867 start = time.time()
1871 start = time.time()
1868 for i in xrange(total_files):
1872 for i in xrange(total_files):
1869 # XXX doesn't support '\n' or '\r' in filenames
1873 # XXX doesn't support '\n' or '\r' in filenames
1870 l = fp.readline()
1874 l = fp.readline()
1871 try:
1875 try:
1872 name, size = l.split('\0', 1)
1876 name, size = l.split('\0', 1)
1873 size = int(size)
1877 size = int(size)
1874 except ValueError, TypeError:
1878 except ValueError, TypeError:
1875 raise util.UnexpectedOutput(
1879 raise util.UnexpectedOutput(
1876 _('Unexpected response from remote server:'), l)
1880 _('Unexpected response from remote server:'), l)
1877 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1881 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1878 ofp = self.sopener(name, 'w')
1882 ofp = self.sopener(name, 'w')
1879 for chunk in util.filechunkiter(fp, limit=size):
1883 for chunk in util.filechunkiter(fp, limit=size):
1880 ofp.write(chunk)
1884 ofp.write(chunk)
1881 ofp.close()
1885 ofp.close()
1882 elapsed = time.time() - start
1886 elapsed = time.time() - start
1883 if elapsed <= 0:
1887 if elapsed <= 0:
1884 elapsed = 0.001
1888 elapsed = 0.001
1885 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1889 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1886 (util.bytecount(total_bytes), elapsed,
1890 (util.bytecount(total_bytes), elapsed,
1887 util.bytecount(total_bytes / elapsed)))
1891 util.bytecount(total_bytes / elapsed)))
1888 self.reload()
1892 self.reload()
1889 return len(self.heads()) + 1
1893 return len(self.heads()) + 1
1890
1894
1891 def clone(self, remote, heads=[], stream=False):
1895 def clone(self, remote, heads=[], stream=False):
1892 '''clone remote repository.
1896 '''clone remote repository.
1893
1897
1894 keyword arguments:
1898 keyword arguments:
1895 heads: list of revs to clone (forces use of pull)
1899 heads: list of revs to clone (forces use of pull)
1896 stream: use streaming clone if possible'''
1900 stream: use streaming clone if possible'''
1897
1901
1898 # now, all clients that can request uncompressed clones can
1902 # now, all clients that can request uncompressed clones can
1899 # read repo formats supported by all servers that can serve
1903 # read repo formats supported by all servers that can serve
1900 # them.
1904 # them.
1901
1905
1902 # if revlog format changes, client will have to check version
1906 # if revlog format changes, client will have to check version
1903 # and format flags on "stream" capability, and use
1907 # and format flags on "stream" capability, and use
1904 # uncompressed only if compatible.
1908 # uncompressed only if compatible.
1905
1909
1906 if stream and not heads and remote.capable('stream'):
1910 if stream and not heads and remote.capable('stream'):
1907 return self.stream_in(remote)
1911 return self.stream_in(remote)
1908 return self.pull(remote, heads)
1912 return self.pull(remote, heads)
1909
1913
1910 # used to avoid circular references so destructors work
1914 # used to avoid circular references so destructors work
1911 def aftertrans(files):
1915 def aftertrans(files):
1912 renamefiles = [tuple(t) for t in files]
1916 renamefiles = [tuple(t) for t in files]
1913 def a():
1917 def a():
1914 for src, dest in renamefiles:
1918 for src, dest in renamefiles:
1915 util.rename(src, dest)
1919 util.rename(src, dest)
1916 return a
1920 return a
1917
1921
1918 def instance(ui, path, create):
1922 def instance(ui, path, create):
1919 return localrepository(ui, util.drop_scheme('file', path), create)
1923 return localrepository(ui, util.drop_scheme('file', path), create)
1920
1924
1921 def islocal(path):
1925 def islocal(path):
1922 return True
1926 return True
@@ -1,16 +1,22 b''
1 adding bar
1 adding bar
2 adding foo
2 adding foo
3 adding bomb
3 adding bomb
4 adding a.c
4 adding a.c
5 adding dir/a.o
5 adding dir/a.o
6 adding dir/b.o
6 adding dir/b.o
7 M dir/b.o
7 M dir/b.o
8 ! a.c
8 ! a.c
9 ! dir/a.o
9 ! dir/a.o
10 ? .hgignore
10 ? .hgignore
11 a.c: unsupported file type (type is fifo)
11 a.c: unsupported file type (type is fifo)
12 ! a.c
12 ! a.c
13 # test absolute path through symlink outside repo
13 # test absolute path through symlink outside repo
14 A f
14 A f
15 # try symlink outside repo to file inside
15 # try symlink outside repo to file inside
16 abort: ../z not under root
16 abort: ../z not under root
17 # try cloning symlink in a subdir
18 1. commit a symlink
19 ? a/b/c/demo
20 adding a/b/c/demo
21 2. clone it
22 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
General Comments 0
You need to be logged in to leave comments. Login now