##// END OF EJS Templates
branch.cache: silently ignore I/O and OS errors
Matt Mackall -
r4415:1a63b44f default
parent child Browse files
Show More
@@ -1,2016 +1,2020 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __del__(self):
21 def __del__(self):
22 self.transhandle = None
22 self.transhandle = None
23 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
24 repo.repository.__init__(self)
24 repo.repository.__init__(self)
25 if not path:
25 if not path:
26 p = os.getcwd()
26 p = os.getcwd()
27 while not os.path.isdir(os.path.join(p, ".hg")):
27 while not os.path.isdir(os.path.join(p, ".hg")):
28 oldp = p
28 oldp = p
29 p = os.path.dirname(p)
29 p = os.path.dirname(p)
30 if p == oldp:
30 if p == oldp:
31 raise repo.RepoError(_("There is no Mercurial repository"
31 raise repo.RepoError(_("There is no Mercurial repository"
32 " here (.hg not found)"))
32 " here (.hg not found)"))
33 path = p
33 path = p
34
34
35 self.root = os.path.realpath(path)
35 self.root = os.path.realpath(path)
36 self.path = os.path.join(self.root, ".hg")
36 self.path = os.path.join(self.root, ".hg")
37 self.origroot = path
37 self.origroot = path
38 self.opener = util.opener(self.path)
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
39 self.wopener = util.opener(self.root)
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements = ("revlogv1", "store")
47 requirements = ("revlogv1", "store")
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 v = self.ui.configrevlog()
91 v = self.ui.configrevlog()
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 fl = v.get('flags', None)
94 fl = v.get('flags', None)
95 flags = 0
95 flags = 0
96 if fl != None:
96 if fl != None:
97 for x in fl.split():
97 for x in fl.split():
98 flags |= revlog.flagstr(x)
98 flags |= revlog.flagstr(x)
99 elif self.revlogv1:
99 elif self.revlogv1:
100 flags = revlog.REVLOG_DEFAULT_FLAGS
100 flags = revlog.REVLOG_DEFAULT_FLAGS
101
101
102 v = self.revlogversion | flags
102 v = self.revlogversion | flags
103 self.manifest = manifest.manifest(self.sopener, v)
103 self.manifest = manifest.manifest(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
105
105
106 fallback = self.ui.config('ui', 'fallbackencoding')
106 fallback = self.ui.config('ui', 'fallbackencoding')
107 if fallback:
107 if fallback:
108 util._fallbackencoding = fallback
108 util._fallbackencoding = fallback
109
109
110 # the changelog might not have the inline index flag
110 # the changelog might not have the inline index flag
111 # on. If the format of the changelog is the same as found in
111 # on. If the format of the changelog is the same as found in
112 # .hgrc, apply any flags found in the .hgrc as well.
112 # .hgrc, apply any flags found in the .hgrc as well.
113 # Otherwise, just version from the changelog
113 # Otherwise, just version from the changelog
114 v = self.changelog.version
114 v = self.changelog.version
115 if v == self.revlogversion:
115 if v == self.revlogversion:
116 v |= flags
116 v |= flags
117 self.revlogversion = v
117 self.revlogversion = v
118
118
119 self.tagscache = None
119 self.tagscache = None
120 self.branchcache = None
120 self.branchcache = None
121 self.nodetagscache = None
121 self.nodetagscache = None
122 self.encodepats = None
122 self.encodepats = None
123 self.decodepats = None
123 self.decodepats = None
124 self.transhandle = None
124 self.transhandle = None
125
125
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def url(self):
128 def url(self):
129 return 'file:' + self.root
129 return 'file:' + self.root
130
130
131 def hook(self, name, throw=False, **args):
131 def hook(self, name, throw=False, **args):
132 def callhook(hname, funcname):
132 def callhook(hname, funcname):
133 '''call python hook. hook is callable object, looked up as
133 '''call python hook. hook is callable object, looked up as
134 name in python module. if callable returns "true", hook
134 name in python module. if callable returns "true", hook
135 fails, else passes. if hook raises exception, treated as
135 fails, else passes. if hook raises exception, treated as
136 hook failure. exception propagates if throw is "true".
136 hook failure. exception propagates if throw is "true".
137
137
138 reason for "true" meaning "hook failed" is so that
138 reason for "true" meaning "hook failed" is so that
139 unmodified commands (e.g. mercurial.commands.update) can
139 unmodified commands (e.g. mercurial.commands.update) can
140 be run as hooks without wrappers to convert return values.'''
140 be run as hooks without wrappers to convert return values.'''
141
141
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 d = funcname.rfind('.')
143 d = funcname.rfind('.')
144 if d == -1:
144 if d == -1:
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 % (hname, funcname))
146 % (hname, funcname))
147 modname = funcname[:d]
147 modname = funcname[:d]
148 try:
148 try:
149 obj = __import__(modname)
149 obj = __import__(modname)
150 except ImportError:
150 except ImportError:
151 try:
151 try:
152 # extensions are loaded with hgext_ prefix
152 # extensions are loaded with hgext_ prefix
153 obj = __import__("hgext_%s" % modname)
153 obj = __import__("hgext_%s" % modname)
154 except ImportError:
154 except ImportError:
155 raise util.Abort(_('%s hook is invalid '
155 raise util.Abort(_('%s hook is invalid '
156 '(import of "%s" failed)') %
156 '(import of "%s" failed)') %
157 (hname, modname))
157 (hname, modname))
158 try:
158 try:
159 for p in funcname.split('.')[1:]:
159 for p in funcname.split('.')[1:]:
160 obj = getattr(obj, p)
160 obj = getattr(obj, p)
161 except AttributeError, err:
161 except AttributeError, err:
162 raise util.Abort(_('%s hook is invalid '
162 raise util.Abort(_('%s hook is invalid '
163 '("%s" is not defined)') %
163 '("%s" is not defined)') %
164 (hname, funcname))
164 (hname, funcname))
165 if not callable(obj):
165 if not callable(obj):
166 raise util.Abort(_('%s hook is invalid '
166 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not callable)') %
167 '("%s" is not callable)') %
168 (hname, funcname))
168 (hname, funcname))
169 try:
169 try:
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 except (KeyboardInterrupt, util.SignalInterrupt):
171 except (KeyboardInterrupt, util.SignalInterrupt):
172 raise
172 raise
173 except Exception, exc:
173 except Exception, exc:
174 if isinstance(exc, util.Abort):
174 if isinstance(exc, util.Abort):
175 self.ui.warn(_('error: %s hook failed: %s\n') %
175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 (hname, exc.args[0]))
176 (hname, exc.args[0]))
177 else:
177 else:
178 self.ui.warn(_('error: %s hook raised an exception: '
178 self.ui.warn(_('error: %s hook raised an exception: '
179 '%s\n') % (hname, exc))
179 '%s\n') % (hname, exc))
180 if throw:
180 if throw:
181 raise
181 raise
182 self.ui.print_exc()
182 self.ui.print_exc()
183 return True
183 return True
184 if r:
184 if r:
185 if throw:
185 if throw:
186 raise util.Abort(_('%s hook failed') % hname)
186 raise util.Abort(_('%s hook failed') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 return r
188 return r
189
189
190 def runhook(name, cmd):
190 def runhook(name, cmd):
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 r = util.system(cmd, environ=env, cwd=self.root)
193 r = util.system(cmd, environ=env, cwd=self.root)
194 if r:
194 if r:
195 desc, r = util.explain_exit(r)
195 desc, r = util.explain_exit(r)
196 if throw:
196 if throw:
197 raise util.Abort(_('%s hook %s') % (name, desc))
197 raise util.Abort(_('%s hook %s') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 return r
199 return r
200
200
201 r = False
201 r = False
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 if hname.split(".", 1)[0] == name and cmd]
203 if hname.split(".", 1)[0] == name and cmd]
204 hooks.sort()
204 hooks.sort()
205 for hname, cmd in hooks:
205 for hname, cmd in hooks:
206 if cmd.startswith('python:'):
206 if cmd.startswith('python:'):
207 r = callhook(hname, cmd[7:].strip()) or r
207 r = callhook(hname, cmd[7:].strip()) or r
208 else:
208 else:
209 r = runhook(hname, cmd) or r
209 r = runhook(hname, cmd) or r
210 return r
210 return r
211
211
212 tag_disallowed = ':\r\n'
212 tag_disallowed = ':\r\n'
213
213
214 def tag(self, name, node, message, local, user, date):
214 def tag(self, name, node, message, local, user, date):
215 '''tag a revision with a symbolic name.
215 '''tag a revision with a symbolic name.
216
216
217 if local is True, the tag is stored in a per-repository file.
217 if local is True, the tag is stored in a per-repository file.
218 otherwise, it is stored in the .hgtags file, and a new
218 otherwise, it is stored in the .hgtags file, and a new
219 changeset is committed with the change.
219 changeset is committed with the change.
220
220
221 keyword arguments:
221 keyword arguments:
222
222
223 local: whether to store tag in non-version-controlled file
223 local: whether to store tag in non-version-controlled file
224 (default False)
224 (default False)
225
225
226 message: commit message to use if committing
226 message: commit message to use if committing
227
227
228 user: name of user to use if committing
228 user: name of user to use if committing
229
229
230 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
231
231
232 for c in self.tag_disallowed:
232 for c in self.tag_disallowed:
233 if c in name:
233 if c in name:
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235
235
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237
237
238 if local:
238 if local:
239 # local tags are stored in the current charset
239 # local tags are stored in the current charset
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 self.hook('tag', node=hex(node), tag=name, local=local)
241 self.hook('tag', node=hex(node), tag=name, local=local)
242 return
242 return
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 # committed tags are stored in UTF-8
249 # committed tags are stored in UTF-8
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 self.wfile('.hgtags', 'ab').write(line)
251 self.wfile('.hgtags', 'ab').write(line)
252 if self.dirstate.state('.hgtags') == '?':
252 if self.dirstate.state('.hgtags') == '?':
253 self.add(['.hgtags'])
253 self.add(['.hgtags'])
254
254
255 self.commit(['.hgtags'], message, user, date)
255 self.commit(['.hgtags'], message, user, date)
256 self.hook('tag', node=hex(node), tag=name, local=local)
256 self.hook('tag', node=hex(node), tag=name, local=local)
257
257
258 def tags(self):
258 def tags(self):
259 '''return a mapping of tag to node'''
259 '''return a mapping of tag to node'''
260 if self.tagscache:
260 if self.tagscache:
261 return self.tagscache
261 return self.tagscache
262
262
263 globaltags = {}
263 globaltags = {}
264
264
265 def readtags(lines, fn):
265 def readtags(lines, fn):
266 filetags = {}
266 filetags = {}
267 count = 0
267 count = 0
268
268
269 def warn(msg):
269 def warn(msg):
270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
271
271
272 for l in lines:
272 for l in lines:
273 count += 1
273 count += 1
274 if not l:
274 if not l:
275 continue
275 continue
276 s = l.split(" ", 1)
276 s = l.split(" ", 1)
277 if len(s) != 2:
277 if len(s) != 2:
278 warn(_("cannot parse entry"))
278 warn(_("cannot parse entry"))
279 continue
279 continue
280 node, key = s
280 node, key = s
281 key = util.tolocal(key.strip()) # stored in UTF-8
281 key = util.tolocal(key.strip()) # stored in UTF-8
282 try:
282 try:
283 bin_n = bin(node)
283 bin_n = bin(node)
284 except TypeError:
284 except TypeError:
285 warn(_("node '%s' is not well formed") % node)
285 warn(_("node '%s' is not well formed") % node)
286 continue
286 continue
287 if bin_n not in self.changelog.nodemap:
287 if bin_n not in self.changelog.nodemap:
288 warn(_("tag '%s' refers to unknown node") % key)
288 warn(_("tag '%s' refers to unknown node") % key)
289 continue
289 continue
290
290
291 h = []
291 h = []
292 if key in filetags:
292 if key in filetags:
293 n, h = filetags[key]
293 n, h = filetags[key]
294 h.append(n)
294 h.append(n)
295 filetags[key] = (bin_n, h)
295 filetags[key] = (bin_n, h)
296
296
297 for k,nh in filetags.items():
297 for k,nh in filetags.items():
298 if k not in globaltags:
298 if k not in globaltags:
299 globaltags[k] = nh
299 globaltags[k] = nh
300 continue
300 continue
301 # we prefer the global tag if:
301 # we prefer the global tag if:
302 # it supercedes us OR
302 # it supercedes us OR
303 # mutual supercedes and it has a higher rank
303 # mutual supercedes and it has a higher rank
304 # otherwise we win because we're tip-most
304 # otherwise we win because we're tip-most
305 an, ah = nh
305 an, ah = nh
306 bn, bh = globaltags[k]
306 bn, bh = globaltags[k]
307 if bn != an and an in bh and \
307 if bn != an and an in bh and \
308 (bn not in ah or len(bh) > len(ah)):
308 (bn not in ah or len(bh) > len(ah)):
309 an = bn
309 an = bn
310 ah.append([n for n in bh if n not in ah])
310 ah.append([n for n in bh if n not in ah])
311 globaltags[k] = an, ah
311 globaltags[k] = an, ah
312
312
313 # read the tags file from each head, ending with the tip
313 # read the tags file from each head, ending with the tip
314 f = None
314 f = None
315 for rev, node, fnode in self._hgtagsnodes():
315 for rev, node, fnode in self._hgtagsnodes():
316 f = (f and f.filectx(fnode) or
316 f = (f and f.filectx(fnode) or
317 self.filectx('.hgtags', fileid=fnode))
317 self.filectx('.hgtags', fileid=fnode))
318 readtags(f.data().splitlines(), f)
318 readtags(f.data().splitlines(), f)
319
319
320 try:
320 try:
321 data = util.fromlocal(self.opener("localtags").read())
321 data = util.fromlocal(self.opener("localtags").read())
322 # localtags are stored in the local character set
322 # localtags are stored in the local character set
323 # while the internal tag table is stored in UTF-8
323 # while the internal tag table is stored in UTF-8
324 readtags(data.splitlines(), "localtags")
324 readtags(data.splitlines(), "localtags")
325 except IOError:
325 except IOError:
326 pass
326 pass
327
327
328 self.tagscache = {}
328 self.tagscache = {}
329 for k,nh in globaltags.items():
329 for k,nh in globaltags.items():
330 n = nh[0]
330 n = nh[0]
331 if n != nullid:
331 if n != nullid:
332 self.tagscache[k] = n
332 self.tagscache[k] = n
333 self.tagscache['tip'] = self.changelog.tip()
333 self.tagscache['tip'] = self.changelog.tip()
334
334
335 return self.tagscache
335 return self.tagscache
336
336
337 def _hgtagsnodes(self):
337 def _hgtagsnodes(self):
338 heads = self.heads()
338 heads = self.heads()
339 heads.reverse()
339 heads.reverse()
340 last = {}
340 last = {}
341 ret = []
341 ret = []
342 for node in heads:
342 for node in heads:
343 c = self.changectx(node)
343 c = self.changectx(node)
344 rev = c.rev()
344 rev = c.rev()
345 try:
345 try:
346 fnode = c.filenode('.hgtags')
346 fnode = c.filenode('.hgtags')
347 except repo.LookupError:
347 except repo.LookupError:
348 continue
348 continue
349 ret.append((rev, node, fnode))
349 ret.append((rev, node, fnode))
350 if fnode in last:
350 if fnode in last:
351 ret[last[fnode]] = None
351 ret[last[fnode]] = None
352 last[fnode] = len(ret) - 1
352 last[fnode] = len(ret) - 1
353 return [item for item in ret if item]
353 return [item for item in ret if item]
354
354
355 def tagslist(self):
355 def tagslist(self):
356 '''return a list of tags ordered by revision'''
356 '''return a list of tags ordered by revision'''
357 l = []
357 l = []
358 for t, n in self.tags().items():
358 for t, n in self.tags().items():
359 try:
359 try:
360 r = self.changelog.rev(n)
360 r = self.changelog.rev(n)
361 except:
361 except:
362 r = -2 # sort to the beginning of the list if unknown
362 r = -2 # sort to the beginning of the list if unknown
363 l.append((r, t, n))
363 l.append((r, t, n))
364 l.sort()
364 l.sort()
365 return [(t, n) for r, t, n in l]
365 return [(t, n) for r, t, n in l]
366
366
367 def nodetags(self, node):
367 def nodetags(self, node):
368 '''return the tags associated with a node'''
368 '''return the tags associated with a node'''
369 if not self.nodetagscache:
369 if not self.nodetagscache:
370 self.nodetagscache = {}
370 self.nodetagscache = {}
371 for t, n in self.tags().items():
371 for t, n in self.tags().items():
372 self.nodetagscache.setdefault(n, []).append(t)
372 self.nodetagscache.setdefault(n, []).append(t)
373 return self.nodetagscache.get(node, [])
373 return self.nodetagscache.get(node, [])
374
374
375 def _branchtags(self):
375 def _branchtags(self):
376 partial, last, lrev = self._readbranchcache()
376 partial, last, lrev = self._readbranchcache()
377
377
378 tiprev = self.changelog.count() - 1
378 tiprev = self.changelog.count() - 1
379 if lrev != tiprev:
379 if lrev != tiprev:
380 self._updatebranchcache(partial, lrev+1, tiprev+1)
380 self._updatebranchcache(partial, lrev+1, tiprev+1)
381 self._writebranchcache(partial, self.changelog.tip(), tiprev)
381 self._writebranchcache(partial, self.changelog.tip(), tiprev)
382
382
383 return partial
383 return partial
384
384
385 def branchtags(self):
385 def branchtags(self):
386 if self.branchcache is not None:
386 if self.branchcache is not None:
387 return self.branchcache
387 return self.branchcache
388
388
389 self.branchcache = {} # avoid recursion in changectx
389 self.branchcache = {} # avoid recursion in changectx
390 partial = self._branchtags()
390 partial = self._branchtags()
391
391
392 # the branch cache is stored on disk as UTF-8, but in the local
392 # the branch cache is stored on disk as UTF-8, but in the local
393 # charset internally
393 # charset internally
394 for k, v in partial.items():
394 for k, v in partial.items():
395 self.branchcache[util.tolocal(k)] = v
395 self.branchcache[util.tolocal(k)] = v
396 return self.branchcache
396 return self.branchcache
397
397
398 def _readbranchcache(self):
398 def _readbranchcache(self):
399 partial = {}
399 partial = {}
400 try:
400 try:
401 f = self.opener("branch.cache")
401 f = self.opener("branch.cache")
402 lines = f.read().split('\n')
402 lines = f.read().split('\n')
403 f.close()
403 f.close()
404 except (IOError, OSError):
405 return {}, nullid, nullrev
406
407 try:
404 last, lrev = lines.pop(0).split(" ", 1)
408 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
409 last, lrev = bin(last), int(lrev)
406 if not (lrev < self.changelog.count() and
410 if not (lrev < self.changelog.count() and
407 self.changelog.node(lrev) == last): # sanity check
411 self.changelog.node(lrev) == last): # sanity check
408 # invalidate the cache
412 # invalidate the cache
409 raise ValueError('Invalid branch cache: unknown tip')
413 raise ValueError('Invalid branch cache: unknown tip')
410 for l in lines:
414 for l in lines:
411 if not l: continue
415 if not l: continue
412 node, label = l.split(" ", 1)
416 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
417 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
418 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
419 raise
416 except Exception, inst:
420 except Exception, inst:
417 if self.ui.debugflag:
421 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
422 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
423 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
424 return partial, last, lrev
421
425
422 def _writebranchcache(self, branches, tip, tiprev):
426 def _writebranchcache(self, branches, tip, tiprev):
423 try:
427 try:
424 f = self.opener("branch.cache", "w", atomictemp=True)
428 f = self.opener("branch.cache", "w", atomictemp=True)
425 f.write("%s %s\n" % (hex(tip), tiprev))
429 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
430 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
431 f.write("%s %s\n" % (hex(node), label))
428 f.rename()
432 f.rename()
429 except IOError:
433 except (IOError, OSError):
430 pass
434 pass
431
435
432 def _updatebranchcache(self, partial, start, end):
436 def _updatebranchcache(self, partial, start, end):
433 for r in xrange(start, end):
437 for r in xrange(start, end):
434 c = self.changectx(r)
438 c = self.changectx(r)
435 b = c.branch()
439 b = c.branch()
436 partial[b] = c.node()
440 partial[b] = c.node()
437
441
438 def lookup(self, key):
442 def lookup(self, key):
439 if key == '.':
443 if key == '.':
440 key = self.dirstate.parents()[0]
444 key = self.dirstate.parents()[0]
441 if key == nullid:
445 if key == nullid:
442 raise repo.RepoError(_("no revision checked out"))
446 raise repo.RepoError(_("no revision checked out"))
443 elif key == 'null':
447 elif key == 'null':
444 return nullid
448 return nullid
445 n = self.changelog._match(key)
449 n = self.changelog._match(key)
446 if n:
450 if n:
447 return n
451 return n
448 if key in self.tags():
452 if key in self.tags():
449 return self.tags()[key]
453 return self.tags()[key]
450 if key in self.branchtags():
454 if key in self.branchtags():
451 return self.branchtags()[key]
455 return self.branchtags()[key]
452 n = self.changelog._partialmatch(key)
456 n = self.changelog._partialmatch(key)
453 if n:
457 if n:
454 return n
458 return n
455 raise repo.RepoError(_("unknown revision '%s'") % key)
459 raise repo.RepoError(_("unknown revision '%s'") % key)
456
460
457 def dev(self):
461 def dev(self):
458 return os.lstat(self.path).st_dev
462 return os.lstat(self.path).st_dev
459
463
460 def local(self):
464 def local(self):
461 return True
465 return True
462
466
463 def join(self, f):
467 def join(self, f):
464 return os.path.join(self.path, f)
468 return os.path.join(self.path, f)
465
469
466 def sjoin(self, f):
470 def sjoin(self, f):
467 f = self.encodefn(f)
471 f = self.encodefn(f)
468 return os.path.join(self.spath, f)
472 return os.path.join(self.spath, f)
469
473
470 def wjoin(self, f):
474 def wjoin(self, f):
471 return os.path.join(self.root, f)
475 return os.path.join(self.root, f)
472
476
473 def file(self, f):
477 def file(self, f):
474 if f[0] == '/':
478 if f[0] == '/':
475 f = f[1:]
479 f = f[1:]
476 return filelog.filelog(self.sopener, f, self.revlogversion)
480 return filelog.filelog(self.sopener, f, self.revlogversion)
477
481
478 def changectx(self, changeid=None):
482 def changectx(self, changeid=None):
479 return context.changectx(self, changeid)
483 return context.changectx(self, changeid)
480
484
481 def workingctx(self):
485 def workingctx(self):
482 return context.workingctx(self)
486 return context.workingctx(self)
483
487
484 def parents(self, changeid=None):
488 def parents(self, changeid=None):
485 '''
489 '''
486 get list of changectxs for parents of changeid or working directory
490 get list of changectxs for parents of changeid or working directory
487 '''
491 '''
488 if changeid is None:
492 if changeid is None:
489 pl = self.dirstate.parents()
493 pl = self.dirstate.parents()
490 else:
494 else:
491 n = self.changelog.lookup(changeid)
495 n = self.changelog.lookup(changeid)
492 pl = self.changelog.parents(n)
496 pl = self.changelog.parents(n)
493 if pl[1] == nullid:
497 if pl[1] == nullid:
494 return [self.changectx(pl[0])]
498 return [self.changectx(pl[0])]
495 return [self.changectx(pl[0]), self.changectx(pl[1])]
499 return [self.changectx(pl[0]), self.changectx(pl[1])]
496
500
497 def filectx(self, path, changeid=None, fileid=None):
501 def filectx(self, path, changeid=None, fileid=None):
498 """changeid can be a changeset revision, node, or tag.
502 """changeid can be a changeset revision, node, or tag.
499 fileid can be a file revision or node."""
503 fileid can be a file revision or node."""
500 return context.filectx(self, path, changeid, fileid)
504 return context.filectx(self, path, changeid, fileid)
501
505
502 def getcwd(self):
506 def getcwd(self):
503 return self.dirstate.getcwd()
507 return self.dirstate.getcwd()
504
508
505 def wfile(self, f, mode='r'):
509 def wfile(self, f, mode='r'):
506 return self.wopener(f, mode)
510 return self.wopener(f, mode)
507
511
508 def wread(self, filename):
512 def wread(self, filename):
509 if self.encodepats == None:
513 if self.encodepats == None:
510 l = []
514 l = []
511 for pat, cmd in self.ui.configitems("encode"):
515 for pat, cmd in self.ui.configitems("encode"):
512 mf = util.matcher(self.root, "", [pat], [], [])[1]
516 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 l.append((mf, cmd))
517 l.append((mf, cmd))
514 self.encodepats = l
518 self.encodepats = l
515
519
516 data = self.wopener(filename, 'r').read()
520 data = self.wopener(filename, 'r').read()
517
521
518 for mf, cmd in self.encodepats:
522 for mf, cmd in self.encodepats:
519 if mf(filename):
523 if mf(filename):
520 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
521 data = util.filter(data, cmd)
525 data = util.filter(data, cmd)
522 break
526 break
523
527
524 return data
528 return data
525
529
526 def wwrite(self, filename, data, fd=None):
530 def wwrite(self, filename, data, fd=None):
527 if self.decodepats == None:
531 if self.decodepats == None:
528 l = []
532 l = []
529 for pat, cmd in self.ui.configitems("decode"):
533 for pat, cmd in self.ui.configitems("decode"):
530 mf = util.matcher(self.root, "", [pat], [], [])[1]
534 mf = util.matcher(self.root, "", [pat], [], [])[1]
531 l.append((mf, cmd))
535 l.append((mf, cmd))
532 self.decodepats = l
536 self.decodepats = l
533
537
534 for mf, cmd in self.decodepats:
538 for mf, cmd in self.decodepats:
535 if mf(filename):
539 if mf(filename):
536 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
540 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
537 data = util.filter(data, cmd)
541 data = util.filter(data, cmd)
538 break
542 break
539
543
540 if fd:
544 if fd:
541 return fd.write(data)
545 return fd.write(data)
542 return self.wopener(filename, 'w').write(data)
546 return self.wopener(filename, 'w').write(data)
543
547
544 def transaction(self):
548 def transaction(self):
545 tr = self.transhandle
549 tr = self.transhandle
546 if tr != None and tr.running():
550 if tr != None and tr.running():
547 return tr.nest()
551 return tr.nest()
548
552
549 # save dirstate for rollback
553 # save dirstate for rollback
550 try:
554 try:
551 ds = self.opener("dirstate").read()
555 ds = self.opener("dirstate").read()
552 except IOError:
556 except IOError:
553 ds = ""
557 ds = ""
554 self.opener("journal.dirstate", "w").write(ds)
558 self.opener("journal.dirstate", "w").write(ds)
555
559
556 renames = [(self.sjoin("journal"), self.sjoin("undo")),
560 renames = [(self.sjoin("journal"), self.sjoin("undo")),
557 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
561 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
558 tr = transaction.transaction(self.ui.warn, self.sopener,
562 tr = transaction.transaction(self.ui.warn, self.sopener,
559 self.sjoin("journal"),
563 self.sjoin("journal"),
560 aftertrans(renames))
564 aftertrans(renames))
561 self.transhandle = tr
565 self.transhandle = tr
562 return tr
566 return tr
563
567
564 def recover(self):
568 def recover(self):
565 l = self.lock()
569 l = self.lock()
566 if os.path.exists(self.sjoin("journal")):
570 if os.path.exists(self.sjoin("journal")):
567 self.ui.status(_("rolling back interrupted transaction\n"))
571 self.ui.status(_("rolling back interrupted transaction\n"))
568 transaction.rollback(self.sopener, self.sjoin("journal"))
572 transaction.rollback(self.sopener, self.sjoin("journal"))
569 self.reload()
573 self.reload()
570 return True
574 return True
571 else:
575 else:
572 self.ui.warn(_("no interrupted transaction available\n"))
576 self.ui.warn(_("no interrupted transaction available\n"))
573 return False
577 return False
574
578
575 def rollback(self, wlock=None):
579 def rollback(self, wlock=None):
576 if not wlock:
580 if not wlock:
577 wlock = self.wlock()
581 wlock = self.wlock()
578 l = self.lock()
582 l = self.lock()
579 if os.path.exists(self.sjoin("undo")):
583 if os.path.exists(self.sjoin("undo")):
580 self.ui.status(_("rolling back last transaction\n"))
584 self.ui.status(_("rolling back last transaction\n"))
581 transaction.rollback(self.sopener, self.sjoin("undo"))
585 transaction.rollback(self.sopener, self.sjoin("undo"))
582 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
586 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
583 self.reload()
587 self.reload()
584 self.wreload()
588 self.wreload()
585 else:
589 else:
586 self.ui.warn(_("no rollback information available\n"))
590 self.ui.warn(_("no rollback information available\n"))
587
591
588 def wreload(self):
592 def wreload(self):
589 self.dirstate.read()
593 self.dirstate.read()
590
594
591 def reload(self):
595 def reload(self):
592 self.changelog.load()
596 self.changelog.load()
593 self.manifest.load()
597 self.manifest.load()
594 self.tagscache = None
598 self.tagscache = None
595 self.nodetagscache = None
599 self.nodetagscache = None
596
600
597 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
601 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
598 desc=None):
602 desc=None):
599 try:
603 try:
600 l = lock.lock(lockname, 0, releasefn, desc=desc)
604 l = lock.lock(lockname, 0, releasefn, desc=desc)
601 except lock.LockHeld, inst:
605 except lock.LockHeld, inst:
602 if not wait:
606 if not wait:
603 raise
607 raise
604 self.ui.warn(_("waiting for lock on %s held by %r\n") %
608 self.ui.warn(_("waiting for lock on %s held by %r\n") %
605 (desc, inst.locker))
609 (desc, inst.locker))
606 # default to 600 seconds timeout
610 # default to 600 seconds timeout
607 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
611 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
608 releasefn, desc=desc)
612 releasefn, desc=desc)
609 if acquirefn:
613 if acquirefn:
610 acquirefn()
614 acquirefn()
611 return l
615 return l
612
616
613 def lock(self, wait=1):
617 def lock(self, wait=1):
614 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
618 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
615 desc=_('repository %s') % self.origroot)
619 desc=_('repository %s') % self.origroot)
616
620
617 def wlock(self, wait=1):
621 def wlock(self, wait=1):
618 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
622 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
619 self.wreload,
623 self.wreload,
620 desc=_('working directory of %s') % self.origroot)
624 desc=_('working directory of %s') % self.origroot)
621
625
622 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
626 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
623 """
627 """
624 commit an individual file as part of a larger transaction
628 commit an individual file as part of a larger transaction
625 """
629 """
626
630
627 t = self.wread(fn)
631 t = self.wread(fn)
628 fl = self.file(fn)
632 fl = self.file(fn)
629 fp1 = manifest1.get(fn, nullid)
633 fp1 = manifest1.get(fn, nullid)
630 fp2 = manifest2.get(fn, nullid)
634 fp2 = manifest2.get(fn, nullid)
631
635
632 meta = {}
636 meta = {}
633 cp = self.dirstate.copied(fn)
637 cp = self.dirstate.copied(fn)
634 if cp:
638 if cp:
635 # Mark the new revision of this file as a copy of another
639 # Mark the new revision of this file as a copy of another
636 # file. This copy data will effectively act as a parent
640 # file. This copy data will effectively act as a parent
637 # of this new revision. If this is a merge, the first
641 # of this new revision. If this is a merge, the first
638 # parent will be the nullid (meaning "look up the copy data")
642 # parent will be the nullid (meaning "look up the copy data")
639 # and the second one will be the other parent. For example:
643 # and the second one will be the other parent. For example:
640 #
644 #
641 # 0 --- 1 --- 3 rev1 changes file foo
645 # 0 --- 1 --- 3 rev1 changes file foo
642 # \ / rev2 renames foo to bar and changes it
646 # \ / rev2 renames foo to bar and changes it
643 # \- 2 -/ rev3 should have bar with all changes and
647 # \- 2 -/ rev3 should have bar with all changes and
644 # should record that bar descends from
648 # should record that bar descends from
645 # bar in rev2 and foo in rev1
649 # bar in rev2 and foo in rev1
646 #
650 #
647 # this allows this merge to succeed:
651 # this allows this merge to succeed:
648 #
652 #
649 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
653 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
650 # \ / merging rev3 and rev4 should use bar@rev2
654 # \ / merging rev3 and rev4 should use bar@rev2
651 # \- 2 --- 4 as the merge base
655 # \- 2 --- 4 as the merge base
652 #
656 #
653 meta["copy"] = cp
657 meta["copy"] = cp
654 if not manifest2: # not a branch merge
658 if not manifest2: # not a branch merge
655 meta["copyrev"] = hex(manifest1.get(cp, nullid))
659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
656 fp2 = nullid
660 fp2 = nullid
657 elif fp2 != nullid: # copied on remote side
661 elif fp2 != nullid: # copied on remote side
658 meta["copyrev"] = hex(manifest1.get(cp, nullid))
662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
659 elif fp1 != nullid: # copied on local side, reversed
663 elif fp1 != nullid: # copied on local side, reversed
660 meta["copyrev"] = hex(manifest2.get(cp))
664 meta["copyrev"] = hex(manifest2.get(cp))
661 fp2 = fp1
665 fp2 = fp1
662 else: # directory rename
666 else: # directory rename
663 meta["copyrev"] = hex(manifest1.get(cp, nullid))
667 meta["copyrev"] = hex(manifest1.get(cp, nullid))
664 self.ui.debug(_(" %s: copy %s:%s\n") %
668 self.ui.debug(_(" %s: copy %s:%s\n") %
665 (fn, cp, meta["copyrev"]))
669 (fn, cp, meta["copyrev"]))
666 fp1 = nullid
670 fp1 = nullid
667 elif fp2 != nullid:
671 elif fp2 != nullid:
668 # is one parent an ancestor of the other?
672 # is one parent an ancestor of the other?
669 fpa = fl.ancestor(fp1, fp2)
673 fpa = fl.ancestor(fp1, fp2)
670 if fpa == fp1:
674 if fpa == fp1:
671 fp1, fp2 = fp2, nullid
675 fp1, fp2 = fp2, nullid
672 elif fpa == fp2:
676 elif fpa == fp2:
673 fp2 = nullid
677 fp2 = nullid
674
678
675 # is the file unmodified from the parent? report existing entry
679 # is the file unmodified from the parent? report existing entry
676 if fp2 == nullid and not fl.cmp(fp1, t):
680 if fp2 == nullid and not fl.cmp(fp1, t):
677 return fp1
681 return fp1
678
682
679 changelist.append(fn)
683 changelist.append(fn)
680 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
684 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
681
685
682 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
686 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
683 if p1 is None:
687 if p1 is None:
684 p1, p2 = self.dirstate.parents()
688 p1, p2 = self.dirstate.parents()
685 return self.commit(files=files, text=text, user=user, date=date,
689 return self.commit(files=files, text=text, user=user, date=date,
686 p1=p1, p2=p2, wlock=wlock)
690 p1=p1, p2=p2, wlock=wlock)
687
691
688 def commit(self, files=None, text="", user=None, date=None,
692 def commit(self, files=None, text="", user=None, date=None,
689 match=util.always, force=False, lock=None, wlock=None,
693 match=util.always, force=False, lock=None, wlock=None,
690 force_editor=False, p1=None, p2=None, extra={}):
694 force_editor=False, p1=None, p2=None, extra={}):
691
695
692 commit = []
696 commit = []
693 remove = []
697 remove = []
694 changed = []
698 changed = []
695 use_dirstate = (p1 is None) # not rawcommit
699 use_dirstate = (p1 is None) # not rawcommit
696 extra = extra.copy()
700 extra = extra.copy()
697
701
698 if use_dirstate:
702 if use_dirstate:
699 if files:
703 if files:
700 for f in files:
704 for f in files:
701 s = self.dirstate.state(f)
705 s = self.dirstate.state(f)
702 if s in 'nmai':
706 if s in 'nmai':
703 commit.append(f)
707 commit.append(f)
704 elif s == 'r':
708 elif s == 'r':
705 remove.append(f)
709 remove.append(f)
706 else:
710 else:
707 self.ui.warn(_("%s not tracked!\n") % f)
711 self.ui.warn(_("%s not tracked!\n") % f)
708 else:
712 else:
709 changes = self.status(match=match)[:5]
713 changes = self.status(match=match)[:5]
710 modified, added, removed, deleted, unknown = changes
714 modified, added, removed, deleted, unknown = changes
711 commit = modified + added
715 commit = modified + added
712 remove = removed
716 remove = removed
713 else:
717 else:
714 commit = files
718 commit = files
715
719
716 if use_dirstate:
720 if use_dirstate:
717 p1, p2 = self.dirstate.parents()
721 p1, p2 = self.dirstate.parents()
718 update_dirstate = True
722 update_dirstate = True
719 else:
723 else:
720 p1, p2 = p1, p2 or nullid
724 p1, p2 = p1, p2 or nullid
721 update_dirstate = (self.dirstate.parents()[0] == p1)
725 update_dirstate = (self.dirstate.parents()[0] == p1)
722
726
723 c1 = self.changelog.read(p1)
727 c1 = self.changelog.read(p1)
724 c2 = self.changelog.read(p2)
728 c2 = self.changelog.read(p2)
725 m1 = self.manifest.read(c1[0]).copy()
729 m1 = self.manifest.read(c1[0]).copy()
726 m2 = self.manifest.read(c2[0])
730 m2 = self.manifest.read(c2[0])
727
731
728 if use_dirstate:
732 if use_dirstate:
729 branchname = self.workingctx().branch()
733 branchname = self.workingctx().branch()
730 try:
734 try:
731 branchname = branchname.decode('UTF-8').encode('UTF-8')
735 branchname = branchname.decode('UTF-8').encode('UTF-8')
732 except UnicodeDecodeError:
736 except UnicodeDecodeError:
733 raise util.Abort(_('branch name not in UTF-8!'))
737 raise util.Abort(_('branch name not in UTF-8!'))
734 else:
738 else:
735 branchname = ""
739 branchname = ""
736
740
737 if use_dirstate:
741 if use_dirstate:
738 oldname = c1[5].get("branch") # stored in UTF-8
742 oldname = c1[5].get("branch") # stored in UTF-8
739 if not commit and not remove and not force and p2 == nullid and \
743 if not commit and not remove and not force and p2 == nullid and \
740 branchname == oldname:
744 branchname == oldname:
741 self.ui.status(_("nothing changed\n"))
745 self.ui.status(_("nothing changed\n"))
742 return None
746 return None
743
747
744 xp1 = hex(p1)
748 xp1 = hex(p1)
745 if p2 == nullid: xp2 = ''
749 if p2 == nullid: xp2 = ''
746 else: xp2 = hex(p2)
750 else: xp2 = hex(p2)
747
751
748 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
752 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
749
753
750 if not wlock:
754 if not wlock:
751 wlock = self.wlock()
755 wlock = self.wlock()
752 if not lock:
756 if not lock:
753 lock = self.lock()
757 lock = self.lock()
754 tr = self.transaction()
758 tr = self.transaction()
755
759
756 # check in files
760 # check in files
757 new = {}
761 new = {}
758 linkrev = self.changelog.count()
762 linkrev = self.changelog.count()
759 commit.sort()
763 commit.sort()
760 for f in commit:
764 for f in commit:
761 self.ui.note(f + "\n")
765 self.ui.note(f + "\n")
762 try:
766 try:
763 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
767 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
764 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
768 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
765 except IOError:
769 except IOError:
766 if use_dirstate:
770 if use_dirstate:
767 self.ui.warn(_("trouble committing %s!\n") % f)
771 self.ui.warn(_("trouble committing %s!\n") % f)
768 raise
772 raise
769 else:
773 else:
770 remove.append(f)
774 remove.append(f)
771
775
772 # update manifest
776 # update manifest
773 m1.update(new)
777 m1.update(new)
774 remove.sort()
778 remove.sort()
775
779
776 for f in remove:
780 for f in remove:
777 if f in m1:
781 if f in m1:
778 del m1[f]
782 del m1[f]
779 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
783 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
780
784
781 # add changeset
785 # add changeset
782 new = new.keys()
786 new = new.keys()
783 new.sort()
787 new.sort()
784
788
785 user = user or self.ui.username()
789 user = user or self.ui.username()
786 if not text or force_editor:
790 if not text or force_editor:
787 edittext = []
791 edittext = []
788 if text:
792 if text:
789 edittext.append(text)
793 edittext.append(text)
790 edittext.append("")
794 edittext.append("")
791 edittext.append("HG: user: %s" % user)
795 edittext.append("HG: user: %s" % user)
792 if p2 != nullid:
796 if p2 != nullid:
793 edittext.append("HG: branch merge")
797 edittext.append("HG: branch merge")
794 edittext.extend(["HG: changed %s" % f for f in changed])
798 edittext.extend(["HG: changed %s" % f for f in changed])
795 edittext.extend(["HG: removed %s" % f for f in remove])
799 edittext.extend(["HG: removed %s" % f for f in remove])
796 if not changed and not remove:
800 if not changed and not remove:
797 edittext.append("HG: no files changed")
801 edittext.append("HG: no files changed")
798 edittext.append("")
802 edittext.append("")
799 # run editor in the repository root
803 # run editor in the repository root
800 olddir = os.getcwd()
804 olddir = os.getcwd()
801 os.chdir(self.root)
805 os.chdir(self.root)
802 text = self.ui.edit("\n".join(edittext), user)
806 text = self.ui.edit("\n".join(edittext), user)
803 os.chdir(olddir)
807 os.chdir(olddir)
804
808
805 lines = [line.rstrip() for line in text.rstrip().splitlines()]
809 lines = [line.rstrip() for line in text.rstrip().splitlines()]
806 while lines and not lines[0]:
810 while lines and not lines[0]:
807 del lines[0]
811 del lines[0]
808 if not lines:
812 if not lines:
809 return None
813 return None
810 text = '\n'.join(lines)
814 text = '\n'.join(lines)
811 if branchname:
815 if branchname:
812 extra["branch"] = branchname
816 extra["branch"] = branchname
813 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
817 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
814 user, date, extra)
818 user, date, extra)
815 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
819 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
816 parent2=xp2)
820 parent2=xp2)
817 tr.close()
821 tr.close()
818
822
819 if use_dirstate or update_dirstate:
823 if use_dirstate or update_dirstate:
820 self.dirstate.setparents(n)
824 self.dirstate.setparents(n)
821 if use_dirstate:
825 if use_dirstate:
822 self.dirstate.update(new, "n")
826 self.dirstate.update(new, "n")
823 self.dirstate.forget(remove)
827 self.dirstate.forget(remove)
824
828
825 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
829 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
826 return n
830 return n
827
831
828 def walk(self, node=None, files=[], match=util.always, badmatch=None):
832 def walk(self, node=None, files=[], match=util.always, badmatch=None):
829 '''
833 '''
830 walk recursively through the directory tree or a given
834 walk recursively through the directory tree or a given
831 changeset, finding all files matched by the match
835 changeset, finding all files matched by the match
832 function
836 function
833
837
834 results are yielded in a tuple (src, filename), where src
838 results are yielded in a tuple (src, filename), where src
835 is one of:
839 is one of:
836 'f' the file was found in the directory tree
840 'f' the file was found in the directory tree
837 'm' the file was only in the dirstate and not in the tree
841 'm' the file was only in the dirstate and not in the tree
838 'b' file was not found and matched badmatch
842 'b' file was not found and matched badmatch
839 '''
843 '''
840
844
841 if node:
845 if node:
842 fdict = dict.fromkeys(files)
846 fdict = dict.fromkeys(files)
843 for fn in self.manifest.read(self.changelog.read(node)[0]):
847 for fn in self.manifest.read(self.changelog.read(node)[0]):
844 for ffn in fdict:
848 for ffn in fdict:
845 # match if the file is the exact name or a directory
849 # match if the file is the exact name or a directory
846 if ffn == fn or fn.startswith("%s/" % ffn):
850 if ffn == fn or fn.startswith("%s/" % ffn):
847 del fdict[ffn]
851 del fdict[ffn]
848 break
852 break
849 if match(fn):
853 if match(fn):
850 yield 'm', fn
854 yield 'm', fn
851 for fn in fdict:
855 for fn in fdict:
852 if badmatch and badmatch(fn):
856 if badmatch and badmatch(fn):
853 if match(fn):
857 if match(fn):
854 yield 'b', fn
858 yield 'b', fn
855 else:
859 else:
856 self.ui.warn(_('%s: No such file in rev %s\n') % (
860 self.ui.warn(_('%s: No such file in rev %s\n') % (
857 util.pathto(self.root, self.getcwd(), fn), short(node)))
861 util.pathto(self.root, self.getcwd(), fn), short(node)))
858 else:
862 else:
859 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
863 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
860 yield src, fn
864 yield src, fn
861
865
862 def status(self, node1=None, node2=None, files=[], match=util.always,
866 def status(self, node1=None, node2=None, files=[], match=util.always,
863 wlock=None, list_ignored=False, list_clean=False):
867 wlock=None, list_ignored=False, list_clean=False):
864 """return status of files between two nodes or node and working directory
868 """return status of files between two nodes or node and working directory
865
869
866 If node1 is None, use the first dirstate parent instead.
870 If node1 is None, use the first dirstate parent instead.
867 If node2 is None, compare node1 with working directory.
871 If node2 is None, compare node1 with working directory.
868 """
872 """
869
873
870 def fcmp(fn, mf):
874 def fcmp(fn, mf):
871 t1 = self.wread(fn)
875 t1 = self.wread(fn)
872 return self.file(fn).cmp(mf.get(fn, nullid), t1)
876 return self.file(fn).cmp(mf.get(fn, nullid), t1)
873
877
874 def mfmatches(node):
878 def mfmatches(node):
875 change = self.changelog.read(node)
879 change = self.changelog.read(node)
876 mf = self.manifest.read(change[0]).copy()
880 mf = self.manifest.read(change[0]).copy()
877 for fn in mf.keys():
881 for fn in mf.keys():
878 if not match(fn):
882 if not match(fn):
879 del mf[fn]
883 del mf[fn]
880 return mf
884 return mf
881
885
882 modified, added, removed, deleted, unknown = [], [], [], [], []
886 modified, added, removed, deleted, unknown = [], [], [], [], []
883 ignored, clean = [], []
887 ignored, clean = [], []
884
888
885 compareworking = False
889 compareworking = False
886 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
890 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
887 compareworking = True
891 compareworking = True
888
892
889 if not compareworking:
893 if not compareworking:
890 # read the manifest from node1 before the manifest from node2,
894 # read the manifest from node1 before the manifest from node2,
891 # so that we'll hit the manifest cache if we're going through
895 # so that we'll hit the manifest cache if we're going through
892 # all the revisions in parent->child order.
896 # all the revisions in parent->child order.
893 mf1 = mfmatches(node1)
897 mf1 = mfmatches(node1)
894
898
895 # are we comparing the working directory?
899 # are we comparing the working directory?
896 if not node2:
900 if not node2:
897 if not wlock:
901 if not wlock:
898 try:
902 try:
899 wlock = self.wlock(wait=0)
903 wlock = self.wlock(wait=0)
900 except lock.LockException:
904 except lock.LockException:
901 wlock = None
905 wlock = None
902 (lookup, modified, added, removed, deleted, unknown,
906 (lookup, modified, added, removed, deleted, unknown,
903 ignored, clean) = self.dirstate.status(files, match,
907 ignored, clean) = self.dirstate.status(files, match,
904 list_ignored, list_clean)
908 list_ignored, list_clean)
905
909
906 # are we comparing working dir against its parent?
910 # are we comparing working dir against its parent?
907 if compareworking:
911 if compareworking:
908 if lookup:
912 if lookup:
909 # do a full compare of any files that might have changed
913 # do a full compare of any files that might have changed
910 mf2 = mfmatches(self.dirstate.parents()[0])
914 mf2 = mfmatches(self.dirstate.parents()[0])
911 for f in lookup:
915 for f in lookup:
912 if fcmp(f, mf2):
916 if fcmp(f, mf2):
913 modified.append(f)
917 modified.append(f)
914 else:
918 else:
915 clean.append(f)
919 clean.append(f)
916 if wlock is not None:
920 if wlock is not None:
917 self.dirstate.update([f], "n")
921 self.dirstate.update([f], "n")
918 else:
922 else:
919 # we are comparing working dir against non-parent
923 # we are comparing working dir against non-parent
920 # generate a pseudo-manifest for the working dir
924 # generate a pseudo-manifest for the working dir
921 # XXX: create it in dirstate.py ?
925 # XXX: create it in dirstate.py ?
922 mf2 = mfmatches(self.dirstate.parents()[0])
926 mf2 = mfmatches(self.dirstate.parents()[0])
923 for f in lookup + modified + added:
927 for f in lookup + modified + added:
924 mf2[f] = ""
928 mf2[f] = ""
925 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
929 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
926 for f in removed:
930 for f in removed:
927 if f in mf2:
931 if f in mf2:
928 del mf2[f]
932 del mf2[f]
929 else:
933 else:
930 # we are comparing two revisions
934 # we are comparing two revisions
931 mf2 = mfmatches(node2)
935 mf2 = mfmatches(node2)
932
936
933 if not compareworking:
937 if not compareworking:
934 # flush lists from dirstate before comparing manifests
938 # flush lists from dirstate before comparing manifests
935 modified, added, clean = [], [], []
939 modified, added, clean = [], [], []
936
940
937 # make sure to sort the files so we talk to the disk in a
941 # make sure to sort the files so we talk to the disk in a
938 # reasonable order
942 # reasonable order
939 mf2keys = mf2.keys()
943 mf2keys = mf2.keys()
940 mf2keys.sort()
944 mf2keys.sort()
941 for fn in mf2keys:
945 for fn in mf2keys:
942 if mf1.has_key(fn):
946 if mf1.has_key(fn):
943 if mf1.flags(fn) != mf2.flags(fn) or \
947 if mf1.flags(fn) != mf2.flags(fn) or \
944 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
948 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
945 modified.append(fn)
949 modified.append(fn)
946 elif list_clean:
950 elif list_clean:
947 clean.append(fn)
951 clean.append(fn)
948 del mf1[fn]
952 del mf1[fn]
949 else:
953 else:
950 added.append(fn)
954 added.append(fn)
951
955
952 removed = mf1.keys()
956 removed = mf1.keys()
953
957
954 # sort and return results:
958 # sort and return results:
955 for l in modified, added, removed, deleted, unknown, ignored, clean:
959 for l in modified, added, removed, deleted, unknown, ignored, clean:
956 l.sort()
960 l.sort()
957 return (modified, added, removed, deleted, unknown, ignored, clean)
961 return (modified, added, removed, deleted, unknown, ignored, clean)
958
962
959 def add(self, list, wlock=None):
963 def add(self, list, wlock=None):
960 if not wlock:
964 if not wlock:
961 wlock = self.wlock()
965 wlock = self.wlock()
962 for f in list:
966 for f in list:
963 p = self.wjoin(f)
967 p = self.wjoin(f)
964 if not os.path.exists(p):
968 if not os.path.exists(p):
965 self.ui.warn(_("%s does not exist!\n") % f)
969 self.ui.warn(_("%s does not exist!\n") % f)
966 elif not os.path.isfile(p):
970 elif not os.path.isfile(p):
967 self.ui.warn(_("%s not added: only files supported currently\n")
971 self.ui.warn(_("%s not added: only files supported currently\n")
968 % f)
972 % f)
969 elif self.dirstate.state(f) in 'an':
973 elif self.dirstate.state(f) in 'an':
970 self.ui.warn(_("%s already tracked!\n") % f)
974 self.ui.warn(_("%s already tracked!\n") % f)
971 else:
975 else:
972 self.dirstate.update([f], "a")
976 self.dirstate.update([f], "a")
973
977
974 def forget(self, list, wlock=None):
978 def forget(self, list, wlock=None):
975 if not wlock:
979 if not wlock:
976 wlock = self.wlock()
980 wlock = self.wlock()
977 for f in list:
981 for f in list:
978 if self.dirstate.state(f) not in 'ai':
982 if self.dirstate.state(f) not in 'ai':
979 self.ui.warn(_("%s not added!\n") % f)
983 self.ui.warn(_("%s not added!\n") % f)
980 else:
984 else:
981 self.dirstate.forget([f])
985 self.dirstate.forget([f])
982
986
983 def remove(self, list, unlink=False, wlock=None):
987 def remove(self, list, unlink=False, wlock=None):
984 if unlink:
988 if unlink:
985 for f in list:
989 for f in list:
986 try:
990 try:
987 util.unlink(self.wjoin(f))
991 util.unlink(self.wjoin(f))
988 except OSError, inst:
992 except OSError, inst:
989 if inst.errno != errno.ENOENT:
993 if inst.errno != errno.ENOENT:
990 raise
994 raise
991 if not wlock:
995 if not wlock:
992 wlock = self.wlock()
996 wlock = self.wlock()
993 for f in list:
997 for f in list:
994 p = self.wjoin(f)
998 p = self.wjoin(f)
995 if os.path.exists(p):
999 if os.path.exists(p):
996 self.ui.warn(_("%s still exists!\n") % f)
1000 self.ui.warn(_("%s still exists!\n") % f)
997 elif self.dirstate.state(f) == 'a':
1001 elif self.dirstate.state(f) == 'a':
998 self.dirstate.forget([f])
1002 self.dirstate.forget([f])
999 elif f not in self.dirstate:
1003 elif f not in self.dirstate:
1000 self.ui.warn(_("%s not tracked!\n") % f)
1004 self.ui.warn(_("%s not tracked!\n") % f)
1001 else:
1005 else:
1002 self.dirstate.update([f], "r")
1006 self.dirstate.update([f], "r")
1003
1007
1004 def undelete(self, list, wlock=None):
1008 def undelete(self, list, wlock=None):
1005 p = self.dirstate.parents()[0]
1009 p = self.dirstate.parents()[0]
1006 mn = self.changelog.read(p)[0]
1010 mn = self.changelog.read(p)[0]
1007 m = self.manifest.read(mn)
1011 m = self.manifest.read(mn)
1008 if not wlock:
1012 if not wlock:
1009 wlock = self.wlock()
1013 wlock = self.wlock()
1010 for f in list:
1014 for f in list:
1011 if self.dirstate.state(f) not in "r":
1015 if self.dirstate.state(f) not in "r":
1012 self.ui.warn("%s not removed!\n" % f)
1016 self.ui.warn("%s not removed!\n" % f)
1013 else:
1017 else:
1014 t = self.file(f).read(m[f])
1018 t = self.file(f).read(m[f])
1015 self.wwrite(f, t)
1019 self.wwrite(f, t)
1016 util.set_exec(self.wjoin(f), m.execf(f))
1020 util.set_exec(self.wjoin(f), m.execf(f))
1017 self.dirstate.update([f], "n")
1021 self.dirstate.update([f], "n")
1018
1022
1019 def copy(self, source, dest, wlock=None):
1023 def copy(self, source, dest, wlock=None):
1020 p = self.wjoin(dest)
1024 p = self.wjoin(dest)
1021 if not os.path.exists(p):
1025 if not os.path.exists(p):
1022 self.ui.warn(_("%s does not exist!\n") % dest)
1026 self.ui.warn(_("%s does not exist!\n") % dest)
1023 elif not os.path.isfile(p):
1027 elif not os.path.isfile(p):
1024 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1028 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1025 else:
1029 else:
1026 if not wlock:
1030 if not wlock:
1027 wlock = self.wlock()
1031 wlock = self.wlock()
1028 if self.dirstate.state(dest) == '?':
1032 if self.dirstate.state(dest) == '?':
1029 self.dirstate.update([dest], "a")
1033 self.dirstate.update([dest], "a")
1030 self.dirstate.copy(source, dest)
1034 self.dirstate.copy(source, dest)
1031
1035
1032 def heads(self, start=None):
1036 def heads(self, start=None):
1033 heads = self.changelog.heads(start)
1037 heads = self.changelog.heads(start)
1034 # sort the output in rev descending order
1038 # sort the output in rev descending order
1035 heads = [(-self.changelog.rev(h), h) for h in heads]
1039 heads = [(-self.changelog.rev(h), h) for h in heads]
1036 heads.sort()
1040 heads.sort()
1037 return [n for (r, n) in heads]
1041 return [n for (r, n) in heads]
1038
1042
1039 # branchlookup returns a dict giving a list of branches for
1043 # branchlookup returns a dict giving a list of branches for
1040 # each head. A branch is defined as the tag of a node or
1044 # each head. A branch is defined as the tag of a node or
1041 # the branch of the node's parents. If a node has multiple
1045 # the branch of the node's parents. If a node has multiple
1042 # branch tags, tags are eliminated if they are visible from other
1046 # branch tags, tags are eliminated if they are visible from other
1043 # branch tags.
1047 # branch tags.
1044 #
1048 #
1045 # So, for this graph: a->b->c->d->e
1049 # So, for this graph: a->b->c->d->e
1046 # \ /
1050 # \ /
1047 # aa -----/
1051 # aa -----/
1048 # a has tag 2.6.12
1052 # a has tag 2.6.12
1049 # d has tag 2.6.13
1053 # d has tag 2.6.13
1050 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1054 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1051 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1055 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1052 # from the list.
1056 # from the list.
1053 #
1057 #
1054 # It is possible that more than one head will have the same branch tag.
1058 # It is possible that more than one head will have the same branch tag.
1055 # callers need to check the result for multiple heads under the same
1059 # callers need to check the result for multiple heads under the same
1056 # branch tag if that is a problem for them (ie checkout of a specific
1060 # branch tag if that is a problem for them (ie checkout of a specific
1057 # branch).
1061 # branch).
1058 #
1062 #
1059 # passing in a specific branch will limit the depth of the search
1063 # passing in a specific branch will limit the depth of the search
1060 # through the parents. It won't limit the branches returned in the
1064 # through the parents. It won't limit the branches returned in the
1061 # result though.
1065 # result though.
1062 def branchlookup(self, heads=None, branch=None):
1066 def branchlookup(self, heads=None, branch=None):
1063 if not heads:
1067 if not heads:
1064 heads = self.heads()
1068 heads = self.heads()
1065 headt = [ h for h in heads ]
1069 headt = [ h for h in heads ]
1066 chlog = self.changelog
1070 chlog = self.changelog
1067 branches = {}
1071 branches = {}
1068 merges = []
1072 merges = []
1069 seenmerge = {}
1073 seenmerge = {}
1070
1074
1071 # traverse the tree once for each head, recording in the branches
1075 # traverse the tree once for each head, recording in the branches
1072 # dict which tags are visible from this head. The branches
1076 # dict which tags are visible from this head. The branches
1073 # dict also records which tags are visible from each tag
1077 # dict also records which tags are visible from each tag
1074 # while we traverse.
1078 # while we traverse.
1075 while headt or merges:
1079 while headt or merges:
1076 if merges:
1080 if merges:
1077 n, found = merges.pop()
1081 n, found = merges.pop()
1078 visit = [n]
1082 visit = [n]
1079 else:
1083 else:
1080 h = headt.pop()
1084 h = headt.pop()
1081 visit = [h]
1085 visit = [h]
1082 found = [h]
1086 found = [h]
1083 seen = {}
1087 seen = {}
1084 while visit:
1088 while visit:
1085 n = visit.pop()
1089 n = visit.pop()
1086 if n in seen:
1090 if n in seen:
1087 continue
1091 continue
1088 pp = chlog.parents(n)
1092 pp = chlog.parents(n)
1089 tags = self.nodetags(n)
1093 tags = self.nodetags(n)
1090 if tags:
1094 if tags:
1091 for x in tags:
1095 for x in tags:
1092 if x == 'tip':
1096 if x == 'tip':
1093 continue
1097 continue
1094 for f in found:
1098 for f in found:
1095 branches.setdefault(f, {})[n] = 1
1099 branches.setdefault(f, {})[n] = 1
1096 branches.setdefault(n, {})[n] = 1
1100 branches.setdefault(n, {})[n] = 1
1097 break
1101 break
1098 if n not in found:
1102 if n not in found:
1099 found.append(n)
1103 found.append(n)
1100 if branch in tags:
1104 if branch in tags:
1101 continue
1105 continue
1102 seen[n] = 1
1106 seen[n] = 1
1103 if pp[1] != nullid and n not in seenmerge:
1107 if pp[1] != nullid and n not in seenmerge:
1104 merges.append((pp[1], [x for x in found]))
1108 merges.append((pp[1], [x for x in found]))
1105 seenmerge[n] = 1
1109 seenmerge[n] = 1
1106 if pp[0] != nullid:
1110 if pp[0] != nullid:
1107 visit.append(pp[0])
1111 visit.append(pp[0])
1108 # traverse the branches dict, eliminating branch tags from each
1112 # traverse the branches dict, eliminating branch tags from each
1109 # head that are visible from another branch tag for that head.
1113 # head that are visible from another branch tag for that head.
1110 out = {}
1114 out = {}
1111 viscache = {}
1115 viscache = {}
1112 for h in heads:
1116 for h in heads:
1113 def visible(node):
1117 def visible(node):
1114 if node in viscache:
1118 if node in viscache:
1115 return viscache[node]
1119 return viscache[node]
1116 ret = {}
1120 ret = {}
1117 visit = [node]
1121 visit = [node]
1118 while visit:
1122 while visit:
1119 x = visit.pop()
1123 x = visit.pop()
1120 if x in viscache:
1124 if x in viscache:
1121 ret.update(viscache[x])
1125 ret.update(viscache[x])
1122 elif x not in ret:
1126 elif x not in ret:
1123 ret[x] = 1
1127 ret[x] = 1
1124 if x in branches:
1128 if x in branches:
1125 visit[len(visit):] = branches[x].keys()
1129 visit[len(visit):] = branches[x].keys()
1126 viscache[node] = ret
1130 viscache[node] = ret
1127 return ret
1131 return ret
1128 if h not in branches:
1132 if h not in branches:
1129 continue
1133 continue
1130 # O(n^2), but somewhat limited. This only searches the
1134 # O(n^2), but somewhat limited. This only searches the
1131 # tags visible from a specific head, not all the tags in the
1135 # tags visible from a specific head, not all the tags in the
1132 # whole repo.
1136 # whole repo.
1133 for b in branches[h]:
1137 for b in branches[h]:
1134 vis = False
1138 vis = False
1135 for bb in branches[h].keys():
1139 for bb in branches[h].keys():
1136 if b != bb:
1140 if b != bb:
1137 if b in visible(bb):
1141 if b in visible(bb):
1138 vis = True
1142 vis = True
1139 break
1143 break
1140 if not vis:
1144 if not vis:
1141 l = out.setdefault(h, [])
1145 l = out.setdefault(h, [])
1142 l[len(l):] = self.nodetags(b)
1146 l[len(l):] = self.nodetags(b)
1143 return out
1147 return out
1144
1148
1145 def branches(self, nodes):
1149 def branches(self, nodes):
1146 if not nodes:
1150 if not nodes:
1147 nodes = [self.changelog.tip()]
1151 nodes = [self.changelog.tip()]
1148 b = []
1152 b = []
1149 for n in nodes:
1153 for n in nodes:
1150 t = n
1154 t = n
1151 while 1:
1155 while 1:
1152 p = self.changelog.parents(n)
1156 p = self.changelog.parents(n)
1153 if p[1] != nullid or p[0] == nullid:
1157 if p[1] != nullid or p[0] == nullid:
1154 b.append((t, n, p[0], p[1]))
1158 b.append((t, n, p[0], p[1]))
1155 break
1159 break
1156 n = p[0]
1160 n = p[0]
1157 return b
1161 return b
1158
1162
1159 def between(self, pairs):
1163 def between(self, pairs):
1160 r = []
1164 r = []
1161
1165
1162 for top, bottom in pairs:
1166 for top, bottom in pairs:
1163 n, l, i = top, [], 0
1167 n, l, i = top, [], 0
1164 f = 1
1168 f = 1
1165
1169
1166 while n != bottom:
1170 while n != bottom:
1167 p = self.changelog.parents(n)[0]
1171 p = self.changelog.parents(n)[0]
1168 if i == f:
1172 if i == f:
1169 l.append(n)
1173 l.append(n)
1170 f = f * 2
1174 f = f * 2
1171 n = p
1175 n = p
1172 i += 1
1176 i += 1
1173
1177
1174 r.append(l)
1178 r.append(l)
1175
1179
1176 return r
1180 return r
1177
1181
1178 def findincoming(self, remote, base=None, heads=None, force=False):
1182 def findincoming(self, remote, base=None, heads=None, force=False):
1179 """Return list of roots of the subsets of missing nodes from remote
1183 """Return list of roots of the subsets of missing nodes from remote
1180
1184
1181 If base dict is specified, assume that these nodes and their parents
1185 If base dict is specified, assume that these nodes and their parents
1182 exist on the remote side and that no child of a node of base exists
1186 exist on the remote side and that no child of a node of base exists
1183 in both remote and self.
1187 in both remote and self.
1184 Furthermore base will be updated to include the nodes that exists
1188 Furthermore base will be updated to include the nodes that exists
1185 in self and remote but no children exists in self and remote.
1189 in self and remote but no children exists in self and remote.
1186 If a list of heads is specified, return only nodes which are heads
1190 If a list of heads is specified, return only nodes which are heads
1187 or ancestors of these heads.
1191 or ancestors of these heads.
1188
1192
1189 All the ancestors of base are in self and in remote.
1193 All the ancestors of base are in self and in remote.
1190 All the descendants of the list returned are missing in self.
1194 All the descendants of the list returned are missing in self.
1191 (and so we know that the rest of the nodes are missing in remote, see
1195 (and so we know that the rest of the nodes are missing in remote, see
1192 outgoing)
1196 outgoing)
1193 """
1197 """
1194 m = self.changelog.nodemap
1198 m = self.changelog.nodemap
1195 search = []
1199 search = []
1196 fetch = {}
1200 fetch = {}
1197 seen = {}
1201 seen = {}
1198 seenbranch = {}
1202 seenbranch = {}
1199 if base == None:
1203 if base == None:
1200 base = {}
1204 base = {}
1201
1205
1202 if not heads:
1206 if not heads:
1203 heads = remote.heads()
1207 heads = remote.heads()
1204
1208
1205 if self.changelog.tip() == nullid:
1209 if self.changelog.tip() == nullid:
1206 base[nullid] = 1
1210 base[nullid] = 1
1207 if heads != [nullid]:
1211 if heads != [nullid]:
1208 return [nullid]
1212 return [nullid]
1209 return []
1213 return []
1210
1214
1211 # assume we're closer to the tip than the root
1215 # assume we're closer to the tip than the root
1212 # and start by examining the heads
1216 # and start by examining the heads
1213 self.ui.status(_("searching for changes\n"))
1217 self.ui.status(_("searching for changes\n"))
1214
1218
1215 unknown = []
1219 unknown = []
1216 for h in heads:
1220 for h in heads:
1217 if h not in m:
1221 if h not in m:
1218 unknown.append(h)
1222 unknown.append(h)
1219 else:
1223 else:
1220 base[h] = 1
1224 base[h] = 1
1221
1225
1222 if not unknown:
1226 if not unknown:
1223 return []
1227 return []
1224
1228
1225 req = dict.fromkeys(unknown)
1229 req = dict.fromkeys(unknown)
1226 reqcnt = 0
1230 reqcnt = 0
1227
1231
1228 # search through remote branches
1232 # search through remote branches
1229 # a 'branch' here is a linear segment of history, with four parts:
1233 # a 'branch' here is a linear segment of history, with four parts:
1230 # head, root, first parent, second parent
1234 # head, root, first parent, second parent
1231 # (a branch always has two parents (or none) by definition)
1235 # (a branch always has two parents (or none) by definition)
1232 unknown = remote.branches(unknown)
1236 unknown = remote.branches(unknown)
1233 while unknown:
1237 while unknown:
1234 r = []
1238 r = []
1235 while unknown:
1239 while unknown:
1236 n = unknown.pop(0)
1240 n = unknown.pop(0)
1237 if n[0] in seen:
1241 if n[0] in seen:
1238 continue
1242 continue
1239
1243
1240 self.ui.debug(_("examining %s:%s\n")
1244 self.ui.debug(_("examining %s:%s\n")
1241 % (short(n[0]), short(n[1])))
1245 % (short(n[0]), short(n[1])))
1242 if n[0] == nullid: # found the end of the branch
1246 if n[0] == nullid: # found the end of the branch
1243 pass
1247 pass
1244 elif n in seenbranch:
1248 elif n in seenbranch:
1245 self.ui.debug(_("branch already found\n"))
1249 self.ui.debug(_("branch already found\n"))
1246 continue
1250 continue
1247 elif n[1] and n[1] in m: # do we know the base?
1251 elif n[1] and n[1] in m: # do we know the base?
1248 self.ui.debug(_("found incomplete branch %s:%s\n")
1252 self.ui.debug(_("found incomplete branch %s:%s\n")
1249 % (short(n[0]), short(n[1])))
1253 % (short(n[0]), short(n[1])))
1250 search.append(n) # schedule branch range for scanning
1254 search.append(n) # schedule branch range for scanning
1251 seenbranch[n] = 1
1255 seenbranch[n] = 1
1252 else:
1256 else:
1253 if n[1] not in seen and n[1] not in fetch:
1257 if n[1] not in seen and n[1] not in fetch:
1254 if n[2] in m and n[3] in m:
1258 if n[2] in m and n[3] in m:
1255 self.ui.debug(_("found new changeset %s\n") %
1259 self.ui.debug(_("found new changeset %s\n") %
1256 short(n[1]))
1260 short(n[1]))
1257 fetch[n[1]] = 1 # earliest unknown
1261 fetch[n[1]] = 1 # earliest unknown
1258 for p in n[2:4]:
1262 for p in n[2:4]:
1259 if p in m:
1263 if p in m:
1260 base[p] = 1 # latest known
1264 base[p] = 1 # latest known
1261
1265
1262 for p in n[2:4]:
1266 for p in n[2:4]:
1263 if p not in req and p not in m:
1267 if p not in req and p not in m:
1264 r.append(p)
1268 r.append(p)
1265 req[p] = 1
1269 req[p] = 1
1266 seen[n[0]] = 1
1270 seen[n[0]] = 1
1267
1271
1268 if r:
1272 if r:
1269 reqcnt += 1
1273 reqcnt += 1
1270 self.ui.debug(_("request %d: %s\n") %
1274 self.ui.debug(_("request %d: %s\n") %
1271 (reqcnt, " ".join(map(short, r))))
1275 (reqcnt, " ".join(map(short, r))))
1272 for p in xrange(0, len(r), 10):
1276 for p in xrange(0, len(r), 10):
1273 for b in remote.branches(r[p:p+10]):
1277 for b in remote.branches(r[p:p+10]):
1274 self.ui.debug(_("received %s:%s\n") %
1278 self.ui.debug(_("received %s:%s\n") %
1275 (short(b[0]), short(b[1])))
1279 (short(b[0]), short(b[1])))
1276 unknown.append(b)
1280 unknown.append(b)
1277
1281
1278 # do binary search on the branches we found
1282 # do binary search on the branches we found
1279 while search:
1283 while search:
1280 n = search.pop(0)
1284 n = search.pop(0)
1281 reqcnt += 1
1285 reqcnt += 1
1282 l = remote.between([(n[0], n[1])])[0]
1286 l = remote.between([(n[0], n[1])])[0]
1283 l.append(n[1])
1287 l.append(n[1])
1284 p = n[0]
1288 p = n[0]
1285 f = 1
1289 f = 1
1286 for i in l:
1290 for i in l:
1287 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1291 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1288 if i in m:
1292 if i in m:
1289 if f <= 2:
1293 if f <= 2:
1290 self.ui.debug(_("found new branch changeset %s\n") %
1294 self.ui.debug(_("found new branch changeset %s\n") %
1291 short(p))
1295 short(p))
1292 fetch[p] = 1
1296 fetch[p] = 1
1293 base[i] = 1
1297 base[i] = 1
1294 else:
1298 else:
1295 self.ui.debug(_("narrowed branch search to %s:%s\n")
1299 self.ui.debug(_("narrowed branch search to %s:%s\n")
1296 % (short(p), short(i)))
1300 % (short(p), short(i)))
1297 search.append((p, i))
1301 search.append((p, i))
1298 break
1302 break
1299 p, f = i, f * 2
1303 p, f = i, f * 2
1300
1304
1301 # sanity check our fetch list
1305 # sanity check our fetch list
1302 for f in fetch.keys():
1306 for f in fetch.keys():
1303 if f in m:
1307 if f in m:
1304 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1308 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1305
1309
1306 if base.keys() == [nullid]:
1310 if base.keys() == [nullid]:
1307 if force:
1311 if force:
1308 self.ui.warn(_("warning: repository is unrelated\n"))
1312 self.ui.warn(_("warning: repository is unrelated\n"))
1309 else:
1313 else:
1310 raise util.Abort(_("repository is unrelated"))
1314 raise util.Abort(_("repository is unrelated"))
1311
1315
1312 self.ui.debug(_("found new changesets starting at ") +
1316 self.ui.debug(_("found new changesets starting at ") +
1313 " ".join([short(f) for f in fetch]) + "\n")
1317 " ".join([short(f) for f in fetch]) + "\n")
1314
1318
1315 self.ui.debug(_("%d total queries\n") % reqcnt)
1319 self.ui.debug(_("%d total queries\n") % reqcnt)
1316
1320
1317 return fetch.keys()
1321 return fetch.keys()
1318
1322
1319 def findoutgoing(self, remote, base=None, heads=None, force=False):
1323 def findoutgoing(self, remote, base=None, heads=None, force=False):
1320 """Return list of nodes that are roots of subsets not in remote
1324 """Return list of nodes that are roots of subsets not in remote
1321
1325
1322 If base dict is specified, assume that these nodes and their parents
1326 If base dict is specified, assume that these nodes and their parents
1323 exist on the remote side.
1327 exist on the remote side.
1324 If a list of heads is specified, return only nodes which are heads
1328 If a list of heads is specified, return only nodes which are heads
1325 or ancestors of these heads, and return a second element which
1329 or ancestors of these heads, and return a second element which
1326 contains all remote heads which get new children.
1330 contains all remote heads which get new children.
1327 """
1331 """
1328 if base == None:
1332 if base == None:
1329 base = {}
1333 base = {}
1330 self.findincoming(remote, base, heads, force=force)
1334 self.findincoming(remote, base, heads, force=force)
1331
1335
1332 self.ui.debug(_("common changesets up to ")
1336 self.ui.debug(_("common changesets up to ")
1333 + " ".join(map(short, base.keys())) + "\n")
1337 + " ".join(map(short, base.keys())) + "\n")
1334
1338
1335 remain = dict.fromkeys(self.changelog.nodemap)
1339 remain = dict.fromkeys(self.changelog.nodemap)
1336
1340
1337 # prune everything remote has from the tree
1341 # prune everything remote has from the tree
1338 del remain[nullid]
1342 del remain[nullid]
1339 remove = base.keys()
1343 remove = base.keys()
1340 while remove:
1344 while remove:
1341 n = remove.pop(0)
1345 n = remove.pop(0)
1342 if n in remain:
1346 if n in remain:
1343 del remain[n]
1347 del remain[n]
1344 for p in self.changelog.parents(n):
1348 for p in self.changelog.parents(n):
1345 remove.append(p)
1349 remove.append(p)
1346
1350
1347 # find every node whose parents have been pruned
1351 # find every node whose parents have been pruned
1348 subset = []
1352 subset = []
1349 # find every remote head that will get new children
1353 # find every remote head that will get new children
1350 updated_heads = {}
1354 updated_heads = {}
1351 for n in remain:
1355 for n in remain:
1352 p1, p2 = self.changelog.parents(n)
1356 p1, p2 = self.changelog.parents(n)
1353 if p1 not in remain and p2 not in remain:
1357 if p1 not in remain and p2 not in remain:
1354 subset.append(n)
1358 subset.append(n)
1355 if heads:
1359 if heads:
1356 if p1 in heads:
1360 if p1 in heads:
1357 updated_heads[p1] = True
1361 updated_heads[p1] = True
1358 if p2 in heads:
1362 if p2 in heads:
1359 updated_heads[p2] = True
1363 updated_heads[p2] = True
1360
1364
1361 # this is the set of all roots we have to push
1365 # this is the set of all roots we have to push
1362 if heads:
1366 if heads:
1363 return subset, updated_heads.keys()
1367 return subset, updated_heads.keys()
1364 else:
1368 else:
1365 return subset
1369 return subset
1366
1370
1367 def pull(self, remote, heads=None, force=False, lock=None):
1371 def pull(self, remote, heads=None, force=False, lock=None):
1368 mylock = False
1372 mylock = False
1369 if not lock:
1373 if not lock:
1370 lock = self.lock()
1374 lock = self.lock()
1371 mylock = True
1375 mylock = True
1372
1376
1373 try:
1377 try:
1374 fetch = self.findincoming(remote, force=force)
1378 fetch = self.findincoming(remote, force=force)
1375 if fetch == [nullid]:
1379 if fetch == [nullid]:
1376 self.ui.status(_("requesting all changes\n"))
1380 self.ui.status(_("requesting all changes\n"))
1377
1381
1378 if not fetch:
1382 if not fetch:
1379 self.ui.status(_("no changes found\n"))
1383 self.ui.status(_("no changes found\n"))
1380 return 0
1384 return 0
1381
1385
1382 if heads is None:
1386 if heads is None:
1383 cg = remote.changegroup(fetch, 'pull')
1387 cg = remote.changegroup(fetch, 'pull')
1384 else:
1388 else:
1385 if 'changegroupsubset' not in remote.capabilities:
1389 if 'changegroupsubset' not in remote.capabilities:
1386 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1390 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1387 cg = remote.changegroupsubset(fetch, heads, 'pull')
1391 cg = remote.changegroupsubset(fetch, heads, 'pull')
1388 return self.addchangegroup(cg, 'pull', remote.url())
1392 return self.addchangegroup(cg, 'pull', remote.url())
1389 finally:
1393 finally:
1390 if mylock:
1394 if mylock:
1391 lock.release()
1395 lock.release()
1392
1396
1393 def push(self, remote, force=False, revs=None):
1397 def push(self, remote, force=False, revs=None):
1394 # there are two ways to push to remote repo:
1398 # there are two ways to push to remote repo:
1395 #
1399 #
1396 # addchangegroup assumes local user can lock remote
1400 # addchangegroup assumes local user can lock remote
1397 # repo (local filesystem, old ssh servers).
1401 # repo (local filesystem, old ssh servers).
1398 #
1402 #
1399 # unbundle assumes local user cannot lock remote repo (new ssh
1403 # unbundle assumes local user cannot lock remote repo (new ssh
1400 # servers, http servers).
1404 # servers, http servers).
1401
1405
1402 if remote.capable('unbundle'):
1406 if remote.capable('unbundle'):
1403 return self.push_unbundle(remote, force, revs)
1407 return self.push_unbundle(remote, force, revs)
1404 return self.push_addchangegroup(remote, force, revs)
1408 return self.push_addchangegroup(remote, force, revs)
1405
1409
1406 def prepush(self, remote, force, revs):
1410 def prepush(self, remote, force, revs):
1407 base = {}
1411 base = {}
1408 remote_heads = remote.heads()
1412 remote_heads = remote.heads()
1409 inc = self.findincoming(remote, base, remote_heads, force=force)
1413 inc = self.findincoming(remote, base, remote_heads, force=force)
1410
1414
1411 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1415 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1412 if revs is not None:
1416 if revs is not None:
1413 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1417 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1414 else:
1418 else:
1415 bases, heads = update, self.changelog.heads()
1419 bases, heads = update, self.changelog.heads()
1416
1420
1417 if not bases:
1421 if not bases:
1418 self.ui.status(_("no changes found\n"))
1422 self.ui.status(_("no changes found\n"))
1419 return None, 1
1423 return None, 1
1420 elif not force:
1424 elif not force:
1421 # check if we're creating new remote heads
1425 # check if we're creating new remote heads
1422 # to be a remote head after push, node must be either
1426 # to be a remote head after push, node must be either
1423 # - unknown locally
1427 # - unknown locally
1424 # - a local outgoing head descended from update
1428 # - a local outgoing head descended from update
1425 # - a remote head that's known locally and not
1429 # - a remote head that's known locally and not
1426 # ancestral to an outgoing head
1430 # ancestral to an outgoing head
1427
1431
1428 warn = 0
1432 warn = 0
1429
1433
1430 if remote_heads == [nullid]:
1434 if remote_heads == [nullid]:
1431 warn = 0
1435 warn = 0
1432 elif not revs and len(heads) > len(remote_heads):
1436 elif not revs and len(heads) > len(remote_heads):
1433 warn = 1
1437 warn = 1
1434 else:
1438 else:
1435 newheads = list(heads)
1439 newheads = list(heads)
1436 for r in remote_heads:
1440 for r in remote_heads:
1437 if r in self.changelog.nodemap:
1441 if r in self.changelog.nodemap:
1438 desc = self.changelog.heads(r, heads)
1442 desc = self.changelog.heads(r, heads)
1439 l = [h for h in heads if h in desc]
1443 l = [h for h in heads if h in desc]
1440 if not l:
1444 if not l:
1441 newheads.append(r)
1445 newheads.append(r)
1442 else:
1446 else:
1443 newheads.append(r)
1447 newheads.append(r)
1444 if len(newheads) > len(remote_heads):
1448 if len(newheads) > len(remote_heads):
1445 warn = 1
1449 warn = 1
1446
1450
1447 if warn:
1451 if warn:
1448 self.ui.warn(_("abort: push creates new remote branches!\n"))
1452 self.ui.warn(_("abort: push creates new remote branches!\n"))
1449 self.ui.status(_("(did you forget to merge?"
1453 self.ui.status(_("(did you forget to merge?"
1450 " use push -f to force)\n"))
1454 " use push -f to force)\n"))
1451 return None, 1
1455 return None, 1
1452 elif inc:
1456 elif inc:
1453 self.ui.warn(_("note: unsynced remote changes!\n"))
1457 self.ui.warn(_("note: unsynced remote changes!\n"))
1454
1458
1455
1459
1456 if revs is None:
1460 if revs is None:
1457 cg = self.changegroup(update, 'push')
1461 cg = self.changegroup(update, 'push')
1458 else:
1462 else:
1459 cg = self.changegroupsubset(update, revs, 'push')
1463 cg = self.changegroupsubset(update, revs, 'push')
1460 return cg, remote_heads
1464 return cg, remote_heads
1461
1465
1462 def push_addchangegroup(self, remote, force, revs):
1466 def push_addchangegroup(self, remote, force, revs):
1463 lock = remote.lock()
1467 lock = remote.lock()
1464
1468
1465 ret = self.prepush(remote, force, revs)
1469 ret = self.prepush(remote, force, revs)
1466 if ret[0] is not None:
1470 if ret[0] is not None:
1467 cg, remote_heads = ret
1471 cg, remote_heads = ret
1468 return remote.addchangegroup(cg, 'push', self.url())
1472 return remote.addchangegroup(cg, 'push', self.url())
1469 return ret[1]
1473 return ret[1]
1470
1474
1471 def push_unbundle(self, remote, force, revs):
1475 def push_unbundle(self, remote, force, revs):
1472 # local repo finds heads on server, finds out what revs it
1476 # local repo finds heads on server, finds out what revs it
1473 # must push. once revs transferred, if server finds it has
1477 # must push. once revs transferred, if server finds it has
1474 # different heads (someone else won commit/push race), server
1478 # different heads (someone else won commit/push race), server
1475 # aborts.
1479 # aborts.
1476
1480
1477 ret = self.prepush(remote, force, revs)
1481 ret = self.prepush(remote, force, revs)
1478 if ret[0] is not None:
1482 if ret[0] is not None:
1479 cg, remote_heads = ret
1483 cg, remote_heads = ret
1480 if force: remote_heads = ['force']
1484 if force: remote_heads = ['force']
1481 return remote.unbundle(cg, remote_heads, 'push')
1485 return remote.unbundle(cg, remote_heads, 'push')
1482 return ret[1]
1486 return ret[1]
1483
1487
1484 def changegroupinfo(self, nodes):
1488 def changegroupinfo(self, nodes):
1485 self.ui.note(_("%d changesets found\n") % len(nodes))
1489 self.ui.note(_("%d changesets found\n") % len(nodes))
1486 if self.ui.debugflag:
1490 if self.ui.debugflag:
1487 self.ui.debug(_("List of changesets:\n"))
1491 self.ui.debug(_("List of changesets:\n"))
1488 for node in nodes:
1492 for node in nodes:
1489 self.ui.debug("%s\n" % hex(node))
1493 self.ui.debug("%s\n" % hex(node))
1490
1494
1491 def changegroupsubset(self, bases, heads, source):
1495 def changegroupsubset(self, bases, heads, source):
1492 """This function generates a changegroup consisting of all the nodes
1496 """This function generates a changegroup consisting of all the nodes
1493 that are descendents of any of the bases, and ancestors of any of
1497 that are descendents of any of the bases, and ancestors of any of
1494 the heads.
1498 the heads.
1495
1499
1496 It is fairly complex as determining which filenodes and which
1500 It is fairly complex as determining which filenodes and which
1497 manifest nodes need to be included for the changeset to be complete
1501 manifest nodes need to be included for the changeset to be complete
1498 is non-trivial.
1502 is non-trivial.
1499
1503
1500 Another wrinkle is doing the reverse, figuring out which changeset in
1504 Another wrinkle is doing the reverse, figuring out which changeset in
1501 the changegroup a particular filenode or manifestnode belongs to."""
1505 the changegroup a particular filenode or manifestnode belongs to."""
1502
1506
1503 self.hook('preoutgoing', throw=True, source=source)
1507 self.hook('preoutgoing', throw=True, source=source)
1504
1508
1505 # Set up some initial variables
1509 # Set up some initial variables
1506 # Make it easy to refer to self.changelog
1510 # Make it easy to refer to self.changelog
1507 cl = self.changelog
1511 cl = self.changelog
1508 # msng is short for missing - compute the list of changesets in this
1512 # msng is short for missing - compute the list of changesets in this
1509 # changegroup.
1513 # changegroup.
1510 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1514 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1511 self.changegroupinfo(msng_cl_lst)
1515 self.changegroupinfo(msng_cl_lst)
1512 # Some bases may turn out to be superfluous, and some heads may be
1516 # Some bases may turn out to be superfluous, and some heads may be
1513 # too. nodesbetween will return the minimal set of bases and heads
1517 # too. nodesbetween will return the minimal set of bases and heads
1514 # necessary to re-create the changegroup.
1518 # necessary to re-create the changegroup.
1515
1519
1516 # Known heads are the list of heads that it is assumed the recipient
1520 # Known heads are the list of heads that it is assumed the recipient
1517 # of this changegroup will know about.
1521 # of this changegroup will know about.
1518 knownheads = {}
1522 knownheads = {}
1519 # We assume that all parents of bases are known heads.
1523 # We assume that all parents of bases are known heads.
1520 for n in bases:
1524 for n in bases:
1521 for p in cl.parents(n):
1525 for p in cl.parents(n):
1522 if p != nullid:
1526 if p != nullid:
1523 knownheads[p] = 1
1527 knownheads[p] = 1
1524 knownheads = knownheads.keys()
1528 knownheads = knownheads.keys()
1525 if knownheads:
1529 if knownheads:
1526 # Now that we know what heads are known, we can compute which
1530 # Now that we know what heads are known, we can compute which
1527 # changesets are known. The recipient must know about all
1531 # changesets are known. The recipient must know about all
1528 # changesets required to reach the known heads from the null
1532 # changesets required to reach the known heads from the null
1529 # changeset.
1533 # changeset.
1530 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1534 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1531 junk = None
1535 junk = None
1532 # Transform the list into an ersatz set.
1536 # Transform the list into an ersatz set.
1533 has_cl_set = dict.fromkeys(has_cl_set)
1537 has_cl_set = dict.fromkeys(has_cl_set)
1534 else:
1538 else:
1535 # If there were no known heads, the recipient cannot be assumed to
1539 # If there were no known heads, the recipient cannot be assumed to
1536 # know about any changesets.
1540 # know about any changesets.
1537 has_cl_set = {}
1541 has_cl_set = {}
1538
1542
1539 # Make it easy to refer to self.manifest
1543 # Make it easy to refer to self.manifest
1540 mnfst = self.manifest
1544 mnfst = self.manifest
1541 # We don't know which manifests are missing yet
1545 # We don't know which manifests are missing yet
1542 msng_mnfst_set = {}
1546 msng_mnfst_set = {}
1543 # Nor do we know which filenodes are missing.
1547 # Nor do we know which filenodes are missing.
1544 msng_filenode_set = {}
1548 msng_filenode_set = {}
1545
1549
1546 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1550 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1547 junk = None
1551 junk = None
1548
1552
1549 # A changeset always belongs to itself, so the changenode lookup
1553 # A changeset always belongs to itself, so the changenode lookup
1550 # function for a changenode is identity.
1554 # function for a changenode is identity.
1551 def identity(x):
1555 def identity(x):
1552 return x
1556 return x
1553
1557
1554 # A function generating function. Sets up an environment for the
1558 # A function generating function. Sets up an environment for the
1555 # inner function.
1559 # inner function.
1556 def cmp_by_rev_func(revlog):
1560 def cmp_by_rev_func(revlog):
1557 # Compare two nodes by their revision number in the environment's
1561 # Compare two nodes by their revision number in the environment's
1558 # revision history. Since the revision number both represents the
1562 # revision history. Since the revision number both represents the
1559 # most efficient order to read the nodes in, and represents a
1563 # most efficient order to read the nodes in, and represents a
1560 # topological sorting of the nodes, this function is often useful.
1564 # topological sorting of the nodes, this function is often useful.
1561 def cmp_by_rev(a, b):
1565 def cmp_by_rev(a, b):
1562 return cmp(revlog.rev(a), revlog.rev(b))
1566 return cmp(revlog.rev(a), revlog.rev(b))
1563 return cmp_by_rev
1567 return cmp_by_rev
1564
1568
1565 # If we determine that a particular file or manifest node must be a
1569 # If we determine that a particular file or manifest node must be a
1566 # node that the recipient of the changegroup will already have, we can
1570 # node that the recipient of the changegroup will already have, we can
1567 # also assume the recipient will have all the parents. This function
1571 # also assume the recipient will have all the parents. This function
1568 # prunes them from the set of missing nodes.
1572 # prunes them from the set of missing nodes.
1569 def prune_parents(revlog, hasset, msngset):
1573 def prune_parents(revlog, hasset, msngset):
1570 haslst = hasset.keys()
1574 haslst = hasset.keys()
1571 haslst.sort(cmp_by_rev_func(revlog))
1575 haslst.sort(cmp_by_rev_func(revlog))
1572 for node in haslst:
1576 for node in haslst:
1573 parentlst = [p for p in revlog.parents(node) if p != nullid]
1577 parentlst = [p for p in revlog.parents(node) if p != nullid]
1574 while parentlst:
1578 while parentlst:
1575 n = parentlst.pop()
1579 n = parentlst.pop()
1576 if n not in hasset:
1580 if n not in hasset:
1577 hasset[n] = 1
1581 hasset[n] = 1
1578 p = [p for p in revlog.parents(n) if p != nullid]
1582 p = [p for p in revlog.parents(n) if p != nullid]
1579 parentlst.extend(p)
1583 parentlst.extend(p)
1580 for n in hasset:
1584 for n in hasset:
1581 msngset.pop(n, None)
1585 msngset.pop(n, None)
1582
1586
1583 # This is a function generating function used to set up an environment
1587 # This is a function generating function used to set up an environment
1584 # for the inner function to execute in.
1588 # for the inner function to execute in.
1585 def manifest_and_file_collector(changedfileset):
1589 def manifest_and_file_collector(changedfileset):
1586 # This is an information gathering function that gathers
1590 # This is an information gathering function that gathers
1587 # information from each changeset node that goes out as part of
1591 # information from each changeset node that goes out as part of
1588 # the changegroup. The information gathered is a list of which
1592 # the changegroup. The information gathered is a list of which
1589 # manifest nodes are potentially required (the recipient may
1593 # manifest nodes are potentially required (the recipient may
1590 # already have them) and total list of all files which were
1594 # already have them) and total list of all files which were
1591 # changed in any changeset in the changegroup.
1595 # changed in any changeset in the changegroup.
1592 #
1596 #
1593 # We also remember the first changenode we saw any manifest
1597 # We also remember the first changenode we saw any manifest
1594 # referenced by so we can later determine which changenode 'owns'
1598 # referenced by so we can later determine which changenode 'owns'
1595 # the manifest.
1599 # the manifest.
1596 def collect_manifests_and_files(clnode):
1600 def collect_manifests_and_files(clnode):
1597 c = cl.read(clnode)
1601 c = cl.read(clnode)
1598 for f in c[3]:
1602 for f in c[3]:
1599 # This is to make sure we only have one instance of each
1603 # This is to make sure we only have one instance of each
1600 # filename string for each filename.
1604 # filename string for each filename.
1601 changedfileset.setdefault(f, f)
1605 changedfileset.setdefault(f, f)
1602 msng_mnfst_set.setdefault(c[0], clnode)
1606 msng_mnfst_set.setdefault(c[0], clnode)
1603 return collect_manifests_and_files
1607 return collect_manifests_and_files
1604
1608
1605 # Figure out which manifest nodes (of the ones we think might be part
1609 # Figure out which manifest nodes (of the ones we think might be part
1606 # of the changegroup) the recipient must know about and remove them
1610 # of the changegroup) the recipient must know about and remove them
1607 # from the changegroup.
1611 # from the changegroup.
1608 def prune_manifests():
1612 def prune_manifests():
1609 has_mnfst_set = {}
1613 has_mnfst_set = {}
1610 for n in msng_mnfst_set:
1614 for n in msng_mnfst_set:
1611 # If a 'missing' manifest thinks it belongs to a changenode
1615 # If a 'missing' manifest thinks it belongs to a changenode
1612 # the recipient is assumed to have, obviously the recipient
1616 # the recipient is assumed to have, obviously the recipient
1613 # must have that manifest.
1617 # must have that manifest.
1614 linknode = cl.node(mnfst.linkrev(n))
1618 linknode = cl.node(mnfst.linkrev(n))
1615 if linknode in has_cl_set:
1619 if linknode in has_cl_set:
1616 has_mnfst_set[n] = 1
1620 has_mnfst_set[n] = 1
1617 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1621 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1618
1622
1619 # Use the information collected in collect_manifests_and_files to say
1623 # Use the information collected in collect_manifests_and_files to say
1620 # which changenode any manifestnode belongs to.
1624 # which changenode any manifestnode belongs to.
1621 def lookup_manifest_link(mnfstnode):
1625 def lookup_manifest_link(mnfstnode):
1622 return msng_mnfst_set[mnfstnode]
1626 return msng_mnfst_set[mnfstnode]
1623
1627
1624 # A function generating function that sets up the initial environment
1628 # A function generating function that sets up the initial environment
1625 # the inner function.
1629 # the inner function.
1626 def filenode_collector(changedfiles):
1630 def filenode_collector(changedfiles):
1627 next_rev = [0]
1631 next_rev = [0]
1628 # This gathers information from each manifestnode included in the
1632 # This gathers information from each manifestnode included in the
1629 # changegroup about which filenodes the manifest node references
1633 # changegroup about which filenodes the manifest node references
1630 # so we can include those in the changegroup too.
1634 # so we can include those in the changegroup too.
1631 #
1635 #
1632 # It also remembers which changenode each filenode belongs to. It
1636 # It also remembers which changenode each filenode belongs to. It
1633 # does this by assuming the a filenode belongs to the changenode
1637 # does this by assuming the a filenode belongs to the changenode
1634 # the first manifest that references it belongs to.
1638 # the first manifest that references it belongs to.
1635 def collect_msng_filenodes(mnfstnode):
1639 def collect_msng_filenodes(mnfstnode):
1636 r = mnfst.rev(mnfstnode)
1640 r = mnfst.rev(mnfstnode)
1637 if r == next_rev[0]:
1641 if r == next_rev[0]:
1638 # If the last rev we looked at was the one just previous,
1642 # If the last rev we looked at was the one just previous,
1639 # we only need to see a diff.
1643 # we only need to see a diff.
1640 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1644 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1641 # For each line in the delta
1645 # For each line in the delta
1642 for dline in delta.splitlines():
1646 for dline in delta.splitlines():
1643 # get the filename and filenode for that line
1647 # get the filename and filenode for that line
1644 f, fnode = dline.split('\0')
1648 f, fnode = dline.split('\0')
1645 fnode = bin(fnode[:40])
1649 fnode = bin(fnode[:40])
1646 f = changedfiles.get(f, None)
1650 f = changedfiles.get(f, None)
1647 # And if the file is in the list of files we care
1651 # And if the file is in the list of files we care
1648 # about.
1652 # about.
1649 if f is not None:
1653 if f is not None:
1650 # Get the changenode this manifest belongs to
1654 # Get the changenode this manifest belongs to
1651 clnode = msng_mnfst_set[mnfstnode]
1655 clnode = msng_mnfst_set[mnfstnode]
1652 # Create the set of filenodes for the file if
1656 # Create the set of filenodes for the file if
1653 # there isn't one already.
1657 # there isn't one already.
1654 ndset = msng_filenode_set.setdefault(f, {})
1658 ndset = msng_filenode_set.setdefault(f, {})
1655 # And set the filenode's changelog node to the
1659 # And set the filenode's changelog node to the
1656 # manifest's if it hasn't been set already.
1660 # manifest's if it hasn't been set already.
1657 ndset.setdefault(fnode, clnode)
1661 ndset.setdefault(fnode, clnode)
1658 else:
1662 else:
1659 # Otherwise we need a full manifest.
1663 # Otherwise we need a full manifest.
1660 m = mnfst.read(mnfstnode)
1664 m = mnfst.read(mnfstnode)
1661 # For every file in we care about.
1665 # For every file in we care about.
1662 for f in changedfiles:
1666 for f in changedfiles:
1663 fnode = m.get(f, None)
1667 fnode = m.get(f, None)
1664 # If it's in the manifest
1668 # If it's in the manifest
1665 if fnode is not None:
1669 if fnode is not None:
1666 # See comments above.
1670 # See comments above.
1667 clnode = msng_mnfst_set[mnfstnode]
1671 clnode = msng_mnfst_set[mnfstnode]
1668 ndset = msng_filenode_set.setdefault(f, {})
1672 ndset = msng_filenode_set.setdefault(f, {})
1669 ndset.setdefault(fnode, clnode)
1673 ndset.setdefault(fnode, clnode)
1670 # Remember the revision we hope to see next.
1674 # Remember the revision we hope to see next.
1671 next_rev[0] = r + 1
1675 next_rev[0] = r + 1
1672 return collect_msng_filenodes
1676 return collect_msng_filenodes
1673
1677
1674 # We have a list of filenodes we think we need for a file, lets remove
1678 # We have a list of filenodes we think we need for a file, lets remove
1675 # all those we now the recipient must have.
1679 # all those we now the recipient must have.
1676 def prune_filenodes(f, filerevlog):
1680 def prune_filenodes(f, filerevlog):
1677 msngset = msng_filenode_set[f]
1681 msngset = msng_filenode_set[f]
1678 hasset = {}
1682 hasset = {}
1679 # If a 'missing' filenode thinks it belongs to a changenode we
1683 # If a 'missing' filenode thinks it belongs to a changenode we
1680 # assume the recipient must have, then the recipient must have
1684 # assume the recipient must have, then the recipient must have
1681 # that filenode.
1685 # that filenode.
1682 for n in msngset:
1686 for n in msngset:
1683 clnode = cl.node(filerevlog.linkrev(n))
1687 clnode = cl.node(filerevlog.linkrev(n))
1684 if clnode in has_cl_set:
1688 if clnode in has_cl_set:
1685 hasset[n] = 1
1689 hasset[n] = 1
1686 prune_parents(filerevlog, hasset, msngset)
1690 prune_parents(filerevlog, hasset, msngset)
1687
1691
1688 # A function generator function that sets up the a context for the
1692 # A function generator function that sets up the a context for the
1689 # inner function.
1693 # inner function.
1690 def lookup_filenode_link_func(fname):
1694 def lookup_filenode_link_func(fname):
1691 msngset = msng_filenode_set[fname]
1695 msngset = msng_filenode_set[fname]
1692 # Lookup the changenode the filenode belongs to.
1696 # Lookup the changenode the filenode belongs to.
1693 def lookup_filenode_link(fnode):
1697 def lookup_filenode_link(fnode):
1694 return msngset[fnode]
1698 return msngset[fnode]
1695 return lookup_filenode_link
1699 return lookup_filenode_link
1696
1700
1697 # Now that we have all theses utility functions to help out and
1701 # Now that we have all theses utility functions to help out and
1698 # logically divide up the task, generate the group.
1702 # logically divide up the task, generate the group.
1699 def gengroup():
1703 def gengroup():
1700 # The set of changed files starts empty.
1704 # The set of changed files starts empty.
1701 changedfiles = {}
1705 changedfiles = {}
1702 # Create a changenode group generator that will call our functions
1706 # Create a changenode group generator that will call our functions
1703 # back to lookup the owning changenode and collect information.
1707 # back to lookup the owning changenode and collect information.
1704 group = cl.group(msng_cl_lst, identity,
1708 group = cl.group(msng_cl_lst, identity,
1705 manifest_and_file_collector(changedfiles))
1709 manifest_and_file_collector(changedfiles))
1706 for chnk in group:
1710 for chnk in group:
1707 yield chnk
1711 yield chnk
1708
1712
1709 # The list of manifests has been collected by the generator
1713 # The list of manifests has been collected by the generator
1710 # calling our functions back.
1714 # calling our functions back.
1711 prune_manifests()
1715 prune_manifests()
1712 msng_mnfst_lst = msng_mnfst_set.keys()
1716 msng_mnfst_lst = msng_mnfst_set.keys()
1713 # Sort the manifestnodes by revision number.
1717 # Sort the manifestnodes by revision number.
1714 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1718 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1715 # Create a generator for the manifestnodes that calls our lookup
1719 # Create a generator for the manifestnodes that calls our lookup
1716 # and data collection functions back.
1720 # and data collection functions back.
1717 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1721 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1718 filenode_collector(changedfiles))
1722 filenode_collector(changedfiles))
1719 for chnk in group:
1723 for chnk in group:
1720 yield chnk
1724 yield chnk
1721
1725
1722 # These are no longer needed, dereference and toss the memory for
1726 # These are no longer needed, dereference and toss the memory for
1723 # them.
1727 # them.
1724 msng_mnfst_lst = None
1728 msng_mnfst_lst = None
1725 msng_mnfst_set.clear()
1729 msng_mnfst_set.clear()
1726
1730
1727 changedfiles = changedfiles.keys()
1731 changedfiles = changedfiles.keys()
1728 changedfiles.sort()
1732 changedfiles.sort()
1729 # Go through all our files in order sorted by name.
1733 # Go through all our files in order sorted by name.
1730 for fname in changedfiles:
1734 for fname in changedfiles:
1731 filerevlog = self.file(fname)
1735 filerevlog = self.file(fname)
1732 # Toss out the filenodes that the recipient isn't really
1736 # Toss out the filenodes that the recipient isn't really
1733 # missing.
1737 # missing.
1734 if msng_filenode_set.has_key(fname):
1738 if msng_filenode_set.has_key(fname):
1735 prune_filenodes(fname, filerevlog)
1739 prune_filenodes(fname, filerevlog)
1736 msng_filenode_lst = msng_filenode_set[fname].keys()
1740 msng_filenode_lst = msng_filenode_set[fname].keys()
1737 else:
1741 else:
1738 msng_filenode_lst = []
1742 msng_filenode_lst = []
1739 # If any filenodes are left, generate the group for them,
1743 # If any filenodes are left, generate the group for them,
1740 # otherwise don't bother.
1744 # otherwise don't bother.
1741 if len(msng_filenode_lst) > 0:
1745 if len(msng_filenode_lst) > 0:
1742 yield changegroup.genchunk(fname)
1746 yield changegroup.genchunk(fname)
1743 # Sort the filenodes by their revision #
1747 # Sort the filenodes by their revision #
1744 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1748 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1745 # Create a group generator and only pass in a changenode
1749 # Create a group generator and only pass in a changenode
1746 # lookup function as we need to collect no information
1750 # lookup function as we need to collect no information
1747 # from filenodes.
1751 # from filenodes.
1748 group = filerevlog.group(msng_filenode_lst,
1752 group = filerevlog.group(msng_filenode_lst,
1749 lookup_filenode_link_func(fname))
1753 lookup_filenode_link_func(fname))
1750 for chnk in group:
1754 for chnk in group:
1751 yield chnk
1755 yield chnk
1752 if msng_filenode_set.has_key(fname):
1756 if msng_filenode_set.has_key(fname):
1753 # Don't need this anymore, toss it to free memory.
1757 # Don't need this anymore, toss it to free memory.
1754 del msng_filenode_set[fname]
1758 del msng_filenode_set[fname]
1755 # Signal that no more groups are left.
1759 # Signal that no more groups are left.
1756 yield changegroup.closechunk()
1760 yield changegroup.closechunk()
1757
1761
1758 if msng_cl_lst:
1762 if msng_cl_lst:
1759 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1763 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1760
1764
1761 return util.chunkbuffer(gengroup())
1765 return util.chunkbuffer(gengroup())
1762
1766
1763 def changegroup(self, basenodes, source):
1767 def changegroup(self, basenodes, source):
1764 """Generate a changegroup of all nodes that we have that a recipient
1768 """Generate a changegroup of all nodes that we have that a recipient
1765 doesn't.
1769 doesn't.
1766
1770
1767 This is much easier than the previous function as we can assume that
1771 This is much easier than the previous function as we can assume that
1768 the recipient has any changenode we aren't sending them."""
1772 the recipient has any changenode we aren't sending them."""
1769
1773
1770 self.hook('preoutgoing', throw=True, source=source)
1774 self.hook('preoutgoing', throw=True, source=source)
1771
1775
1772 cl = self.changelog
1776 cl = self.changelog
1773 nodes = cl.nodesbetween(basenodes, None)[0]
1777 nodes = cl.nodesbetween(basenodes, None)[0]
1774 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1778 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1775 self.changegroupinfo(nodes)
1779 self.changegroupinfo(nodes)
1776
1780
1777 def identity(x):
1781 def identity(x):
1778 return x
1782 return x
1779
1783
1780 def gennodelst(revlog):
1784 def gennodelst(revlog):
1781 for r in xrange(0, revlog.count()):
1785 for r in xrange(0, revlog.count()):
1782 n = revlog.node(r)
1786 n = revlog.node(r)
1783 if revlog.linkrev(n) in revset:
1787 if revlog.linkrev(n) in revset:
1784 yield n
1788 yield n
1785
1789
1786 def changed_file_collector(changedfileset):
1790 def changed_file_collector(changedfileset):
1787 def collect_changed_files(clnode):
1791 def collect_changed_files(clnode):
1788 c = cl.read(clnode)
1792 c = cl.read(clnode)
1789 for fname in c[3]:
1793 for fname in c[3]:
1790 changedfileset[fname] = 1
1794 changedfileset[fname] = 1
1791 return collect_changed_files
1795 return collect_changed_files
1792
1796
1793 def lookuprevlink_func(revlog):
1797 def lookuprevlink_func(revlog):
1794 def lookuprevlink(n):
1798 def lookuprevlink(n):
1795 return cl.node(revlog.linkrev(n))
1799 return cl.node(revlog.linkrev(n))
1796 return lookuprevlink
1800 return lookuprevlink
1797
1801
1798 def gengroup():
1802 def gengroup():
1799 # construct a list of all changed files
1803 # construct a list of all changed files
1800 changedfiles = {}
1804 changedfiles = {}
1801
1805
1802 for chnk in cl.group(nodes, identity,
1806 for chnk in cl.group(nodes, identity,
1803 changed_file_collector(changedfiles)):
1807 changed_file_collector(changedfiles)):
1804 yield chnk
1808 yield chnk
1805 changedfiles = changedfiles.keys()
1809 changedfiles = changedfiles.keys()
1806 changedfiles.sort()
1810 changedfiles.sort()
1807
1811
1808 mnfst = self.manifest
1812 mnfst = self.manifest
1809 nodeiter = gennodelst(mnfst)
1813 nodeiter = gennodelst(mnfst)
1810 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1814 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1811 yield chnk
1815 yield chnk
1812
1816
1813 for fname in changedfiles:
1817 for fname in changedfiles:
1814 filerevlog = self.file(fname)
1818 filerevlog = self.file(fname)
1815 nodeiter = gennodelst(filerevlog)
1819 nodeiter = gennodelst(filerevlog)
1816 nodeiter = list(nodeiter)
1820 nodeiter = list(nodeiter)
1817 if nodeiter:
1821 if nodeiter:
1818 yield changegroup.genchunk(fname)
1822 yield changegroup.genchunk(fname)
1819 lookup = lookuprevlink_func(filerevlog)
1823 lookup = lookuprevlink_func(filerevlog)
1820 for chnk in filerevlog.group(nodeiter, lookup):
1824 for chnk in filerevlog.group(nodeiter, lookup):
1821 yield chnk
1825 yield chnk
1822
1826
1823 yield changegroup.closechunk()
1827 yield changegroup.closechunk()
1824
1828
1825 if nodes:
1829 if nodes:
1826 self.hook('outgoing', node=hex(nodes[0]), source=source)
1830 self.hook('outgoing', node=hex(nodes[0]), source=source)
1827
1831
1828 return util.chunkbuffer(gengroup())
1832 return util.chunkbuffer(gengroup())
1829
1833
1830 def addchangegroup(self, source, srctype, url):
1834 def addchangegroup(self, source, srctype, url):
1831 """add changegroup to repo.
1835 """add changegroup to repo.
1832
1836
1833 return values:
1837 return values:
1834 - nothing changed or no source: 0
1838 - nothing changed or no source: 0
1835 - more heads than before: 1+added heads (2..n)
1839 - more heads than before: 1+added heads (2..n)
1836 - less heads than before: -1-removed heads (-2..-n)
1840 - less heads than before: -1-removed heads (-2..-n)
1837 - number of heads stays the same: 1
1841 - number of heads stays the same: 1
1838 """
1842 """
1839 def csmap(x):
1843 def csmap(x):
1840 self.ui.debug(_("add changeset %s\n") % short(x))
1844 self.ui.debug(_("add changeset %s\n") % short(x))
1841 return cl.count()
1845 return cl.count()
1842
1846
1843 def revmap(x):
1847 def revmap(x):
1844 return cl.rev(x)
1848 return cl.rev(x)
1845
1849
1846 if not source:
1850 if not source:
1847 return 0
1851 return 0
1848
1852
1849 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1853 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1850
1854
1851 changesets = files = revisions = 0
1855 changesets = files = revisions = 0
1852
1856
1853 tr = self.transaction()
1857 tr = self.transaction()
1854
1858
1855 # write changelog data to temp files so concurrent readers will not see
1859 # write changelog data to temp files so concurrent readers will not see
1856 # inconsistent view
1860 # inconsistent view
1857 cl = None
1861 cl = None
1858 try:
1862 try:
1859 cl = appendfile.appendchangelog(self.sopener,
1863 cl = appendfile.appendchangelog(self.sopener,
1860 self.changelog.version)
1864 self.changelog.version)
1861
1865
1862 oldheads = len(cl.heads())
1866 oldheads = len(cl.heads())
1863
1867
1864 # pull off the changeset group
1868 # pull off the changeset group
1865 self.ui.status(_("adding changesets\n"))
1869 self.ui.status(_("adding changesets\n"))
1866 cor = cl.count() - 1
1870 cor = cl.count() - 1
1867 chunkiter = changegroup.chunkiter(source)
1871 chunkiter = changegroup.chunkiter(source)
1868 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1872 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1869 raise util.Abort(_("received changelog group is empty"))
1873 raise util.Abort(_("received changelog group is empty"))
1870 cnr = cl.count() - 1
1874 cnr = cl.count() - 1
1871 changesets = cnr - cor
1875 changesets = cnr - cor
1872
1876
1873 # pull off the manifest group
1877 # pull off the manifest group
1874 self.ui.status(_("adding manifests\n"))
1878 self.ui.status(_("adding manifests\n"))
1875 chunkiter = changegroup.chunkiter(source)
1879 chunkiter = changegroup.chunkiter(source)
1876 # no need to check for empty manifest group here:
1880 # no need to check for empty manifest group here:
1877 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1881 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1878 # no new manifest will be created and the manifest group will
1882 # no new manifest will be created and the manifest group will
1879 # be empty during the pull
1883 # be empty during the pull
1880 self.manifest.addgroup(chunkiter, revmap, tr)
1884 self.manifest.addgroup(chunkiter, revmap, tr)
1881
1885
1882 # process the files
1886 # process the files
1883 self.ui.status(_("adding file changes\n"))
1887 self.ui.status(_("adding file changes\n"))
1884 while 1:
1888 while 1:
1885 f = changegroup.getchunk(source)
1889 f = changegroup.getchunk(source)
1886 if not f:
1890 if not f:
1887 break
1891 break
1888 self.ui.debug(_("adding %s revisions\n") % f)
1892 self.ui.debug(_("adding %s revisions\n") % f)
1889 fl = self.file(f)
1893 fl = self.file(f)
1890 o = fl.count()
1894 o = fl.count()
1891 chunkiter = changegroup.chunkiter(source)
1895 chunkiter = changegroup.chunkiter(source)
1892 if fl.addgroup(chunkiter, revmap, tr) is None:
1896 if fl.addgroup(chunkiter, revmap, tr) is None:
1893 raise util.Abort(_("received file revlog group is empty"))
1897 raise util.Abort(_("received file revlog group is empty"))
1894 revisions += fl.count() - o
1898 revisions += fl.count() - o
1895 files += 1
1899 files += 1
1896
1900
1897 cl.writedata()
1901 cl.writedata()
1898 finally:
1902 finally:
1899 if cl:
1903 if cl:
1900 cl.cleanup()
1904 cl.cleanup()
1901
1905
1902 # make changelog see real files again
1906 # make changelog see real files again
1903 self.changelog = changelog.changelog(self.sopener,
1907 self.changelog = changelog.changelog(self.sopener,
1904 self.changelog.version)
1908 self.changelog.version)
1905 self.changelog.checkinlinesize(tr)
1909 self.changelog.checkinlinesize(tr)
1906
1910
1907 newheads = len(self.changelog.heads())
1911 newheads = len(self.changelog.heads())
1908 heads = ""
1912 heads = ""
1909 if oldheads and newheads != oldheads:
1913 if oldheads and newheads != oldheads:
1910 heads = _(" (%+d heads)") % (newheads - oldheads)
1914 heads = _(" (%+d heads)") % (newheads - oldheads)
1911
1915
1912 self.ui.status(_("added %d changesets"
1916 self.ui.status(_("added %d changesets"
1913 " with %d changes to %d files%s\n")
1917 " with %d changes to %d files%s\n")
1914 % (changesets, revisions, files, heads))
1918 % (changesets, revisions, files, heads))
1915
1919
1916 if changesets > 0:
1920 if changesets > 0:
1917 self.hook('pretxnchangegroup', throw=True,
1921 self.hook('pretxnchangegroup', throw=True,
1918 node=hex(self.changelog.node(cor+1)), source=srctype,
1922 node=hex(self.changelog.node(cor+1)), source=srctype,
1919 url=url)
1923 url=url)
1920
1924
1921 tr.close()
1925 tr.close()
1922
1926
1923 if changesets > 0:
1927 if changesets > 0:
1924 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1928 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1925 source=srctype, url=url)
1929 source=srctype, url=url)
1926
1930
1927 for i in xrange(cor + 1, cnr + 1):
1931 for i in xrange(cor + 1, cnr + 1):
1928 self.hook("incoming", node=hex(self.changelog.node(i)),
1932 self.hook("incoming", node=hex(self.changelog.node(i)),
1929 source=srctype, url=url)
1933 source=srctype, url=url)
1930
1934
1931 # never return 0 here:
1935 # never return 0 here:
1932 if newheads < oldheads:
1936 if newheads < oldheads:
1933 return newheads - oldheads - 1
1937 return newheads - oldheads - 1
1934 else:
1938 else:
1935 return newheads - oldheads + 1
1939 return newheads - oldheads + 1
1936
1940
1937
1941
1938 def stream_in(self, remote):
1942 def stream_in(self, remote):
1939 fp = remote.stream_out()
1943 fp = remote.stream_out()
1940 l = fp.readline()
1944 l = fp.readline()
1941 try:
1945 try:
1942 resp = int(l)
1946 resp = int(l)
1943 except ValueError:
1947 except ValueError:
1944 raise util.UnexpectedOutput(
1948 raise util.UnexpectedOutput(
1945 _('Unexpected response from remote server:'), l)
1949 _('Unexpected response from remote server:'), l)
1946 if resp == 1:
1950 if resp == 1:
1947 raise util.Abort(_('operation forbidden by server'))
1951 raise util.Abort(_('operation forbidden by server'))
1948 elif resp == 2:
1952 elif resp == 2:
1949 raise util.Abort(_('locking the remote repository failed'))
1953 raise util.Abort(_('locking the remote repository failed'))
1950 elif resp != 0:
1954 elif resp != 0:
1951 raise util.Abort(_('the server sent an unknown error code'))
1955 raise util.Abort(_('the server sent an unknown error code'))
1952 self.ui.status(_('streaming all changes\n'))
1956 self.ui.status(_('streaming all changes\n'))
1953 l = fp.readline()
1957 l = fp.readline()
1954 try:
1958 try:
1955 total_files, total_bytes = map(int, l.split(' ', 1))
1959 total_files, total_bytes = map(int, l.split(' ', 1))
1956 except ValueError, TypeError:
1960 except ValueError, TypeError:
1957 raise util.UnexpectedOutput(
1961 raise util.UnexpectedOutput(
1958 _('Unexpected response from remote server:'), l)
1962 _('Unexpected response from remote server:'), l)
1959 self.ui.status(_('%d files to transfer, %s of data\n') %
1963 self.ui.status(_('%d files to transfer, %s of data\n') %
1960 (total_files, util.bytecount(total_bytes)))
1964 (total_files, util.bytecount(total_bytes)))
1961 start = time.time()
1965 start = time.time()
1962 for i in xrange(total_files):
1966 for i in xrange(total_files):
1963 # XXX doesn't support '\n' or '\r' in filenames
1967 # XXX doesn't support '\n' or '\r' in filenames
1964 l = fp.readline()
1968 l = fp.readline()
1965 try:
1969 try:
1966 name, size = l.split('\0', 1)
1970 name, size = l.split('\0', 1)
1967 size = int(size)
1971 size = int(size)
1968 except ValueError, TypeError:
1972 except ValueError, TypeError:
1969 raise util.UnexpectedOutput(
1973 raise util.UnexpectedOutput(
1970 _('Unexpected response from remote server:'), l)
1974 _('Unexpected response from remote server:'), l)
1971 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1975 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1972 ofp = self.sopener(name, 'w')
1976 ofp = self.sopener(name, 'w')
1973 for chunk in util.filechunkiter(fp, limit=size):
1977 for chunk in util.filechunkiter(fp, limit=size):
1974 ofp.write(chunk)
1978 ofp.write(chunk)
1975 ofp.close()
1979 ofp.close()
1976 elapsed = time.time() - start
1980 elapsed = time.time() - start
1977 if elapsed <= 0:
1981 if elapsed <= 0:
1978 elapsed = 0.001
1982 elapsed = 0.001
1979 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1983 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1980 (util.bytecount(total_bytes), elapsed,
1984 (util.bytecount(total_bytes), elapsed,
1981 util.bytecount(total_bytes / elapsed)))
1985 util.bytecount(total_bytes / elapsed)))
1982 self.reload()
1986 self.reload()
1983 return len(self.heads()) + 1
1987 return len(self.heads()) + 1
1984
1988
1985 def clone(self, remote, heads=[], stream=False):
1989 def clone(self, remote, heads=[], stream=False):
1986 '''clone remote repository.
1990 '''clone remote repository.
1987
1991
1988 keyword arguments:
1992 keyword arguments:
1989 heads: list of revs to clone (forces use of pull)
1993 heads: list of revs to clone (forces use of pull)
1990 stream: use streaming clone if possible'''
1994 stream: use streaming clone if possible'''
1991
1995
1992 # now, all clients that can request uncompressed clones can
1996 # now, all clients that can request uncompressed clones can
1993 # read repo formats supported by all servers that can serve
1997 # read repo formats supported by all servers that can serve
1994 # them.
1998 # them.
1995
1999
1996 # if revlog format changes, client will have to check version
2000 # if revlog format changes, client will have to check version
1997 # and format flags on "stream" capability, and use
2001 # and format flags on "stream" capability, and use
1998 # uncompressed only if compatible.
2002 # uncompressed only if compatible.
1999
2003
2000 if stream and not heads and remote.capable('stream'):
2004 if stream and not heads and remote.capable('stream'):
2001 return self.stream_in(remote)
2005 return self.stream_in(remote)
2002 return self.pull(remote, heads)
2006 return self.pull(remote, heads)
2003
2007
2004 # used to avoid circular references so destructors work
2008 # used to avoid circular references so destructors work
2005 def aftertrans(files):
2009 def aftertrans(files):
2006 renamefiles = [tuple(t) for t in files]
2010 renamefiles = [tuple(t) for t in files]
2007 def a():
2011 def a():
2008 for src, dest in renamefiles:
2012 for src, dest in renamefiles:
2009 util.rename(src, dest)
2013 util.rename(src, dest)
2010 return a
2014 return a
2011
2015
2012 def instance(ui, path, create):
2016 def instance(ui, path, create):
2013 return localrepository(ui, util.drop_scheme('file', path), create)
2017 return localrepository(ui, util.drop_scheme('file', path), create)
2014
2018
2015 def islocal(path):
2019 def islocal(path):
2016 return True
2020 return True
General Comments 0
You need to be logged in to leave comments. Login now