##// END OF EJS Templates
tags: fix abababa case, with test case
Matt Mackall -
r4266:fe7f38dd default
parent child Browse files
Show More
@@ -1,2015 +1,2015
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __del__(self):
21 def __del__(self):
22 self.transhandle = None
22 self.transhandle = None
23 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
24 repo.repository.__init__(self)
24 repo.repository.__init__(self)
25 if not path:
25 if not path:
26 p = os.getcwd()
26 p = os.getcwd()
27 while not os.path.isdir(os.path.join(p, ".hg")):
27 while not os.path.isdir(os.path.join(p, ".hg")):
28 oldp = p
28 oldp = p
29 p = os.path.dirname(p)
29 p = os.path.dirname(p)
30 if p == oldp:
30 if p == oldp:
31 raise repo.RepoError(_("There is no Mercurial repository"
31 raise repo.RepoError(_("There is no Mercurial repository"
32 " here (.hg not found)"))
32 " here (.hg not found)"))
33 path = p
33 path = p
34
34
35 self.root = os.path.realpath(path)
35 self.root = os.path.realpath(path)
36 self.path = os.path.join(self.root, ".hg")
36 self.path = os.path.join(self.root, ".hg")
37 self.origroot = path
37 self.origroot = path
38 self.opener = util.opener(self.path)
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
39 self.wopener = util.opener(self.root)
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements = ("revlogv1", "store")
47 requirements = ("revlogv1", "store")
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 v = self.ui.configrevlog()
91 v = self.ui.configrevlog()
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 fl = v.get('flags', None)
94 fl = v.get('flags', None)
95 flags = 0
95 flags = 0
96 if fl != None:
96 if fl != None:
97 for x in fl.split():
97 for x in fl.split():
98 flags |= revlog.flagstr(x)
98 flags |= revlog.flagstr(x)
99 elif self.revlogv1:
99 elif self.revlogv1:
100 flags = revlog.REVLOG_DEFAULT_FLAGS
100 flags = revlog.REVLOG_DEFAULT_FLAGS
101
101
102 v = self.revlogversion | flags
102 v = self.revlogversion | flags
103 self.manifest = manifest.manifest(self.sopener, v)
103 self.manifest = manifest.manifest(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
105
105
106 fallback = self.ui.config('ui', 'fallbackencoding')
106 fallback = self.ui.config('ui', 'fallbackencoding')
107 if fallback:
107 if fallback:
108 util._fallbackencoding = fallback
108 util._fallbackencoding = fallback
109
109
110 # the changelog might not have the inline index flag
110 # the changelog might not have the inline index flag
111 # on. If the format of the changelog is the same as found in
111 # on. If the format of the changelog is the same as found in
112 # .hgrc, apply any flags found in the .hgrc as well.
112 # .hgrc, apply any flags found in the .hgrc as well.
113 # Otherwise, just version from the changelog
113 # Otherwise, just version from the changelog
114 v = self.changelog.version
114 v = self.changelog.version
115 if v == self.revlogversion:
115 if v == self.revlogversion:
116 v |= flags
116 v |= flags
117 self.revlogversion = v
117 self.revlogversion = v
118
118
119 self.tagscache = None
119 self.tagscache = None
120 self.branchcache = None
120 self.branchcache = None
121 self.nodetagscache = None
121 self.nodetagscache = None
122 self.encodepats = None
122 self.encodepats = None
123 self.decodepats = None
123 self.decodepats = None
124 self.transhandle = None
124 self.transhandle = None
125
125
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def url(self):
128 def url(self):
129 return 'file:' + self.root
129 return 'file:' + self.root
130
130
131 def hook(self, name, throw=False, **args):
131 def hook(self, name, throw=False, **args):
132 def callhook(hname, funcname):
132 def callhook(hname, funcname):
133 '''call python hook. hook is callable object, looked up as
133 '''call python hook. hook is callable object, looked up as
134 name in python module. if callable returns "true", hook
134 name in python module. if callable returns "true", hook
135 fails, else passes. if hook raises exception, treated as
135 fails, else passes. if hook raises exception, treated as
136 hook failure. exception propagates if throw is "true".
136 hook failure. exception propagates if throw is "true".
137
137
138 reason for "true" meaning "hook failed" is so that
138 reason for "true" meaning "hook failed" is so that
139 unmodified commands (e.g. mercurial.commands.update) can
139 unmodified commands (e.g. mercurial.commands.update) can
140 be run as hooks without wrappers to convert return values.'''
140 be run as hooks without wrappers to convert return values.'''
141
141
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 d = funcname.rfind('.')
143 d = funcname.rfind('.')
144 if d == -1:
144 if d == -1:
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 % (hname, funcname))
146 % (hname, funcname))
147 modname = funcname[:d]
147 modname = funcname[:d]
148 try:
148 try:
149 obj = __import__(modname)
149 obj = __import__(modname)
150 except ImportError:
150 except ImportError:
151 try:
151 try:
152 # extensions are loaded with hgext_ prefix
152 # extensions are loaded with hgext_ prefix
153 obj = __import__("hgext_%s" % modname)
153 obj = __import__("hgext_%s" % modname)
154 except ImportError:
154 except ImportError:
155 raise util.Abort(_('%s hook is invalid '
155 raise util.Abort(_('%s hook is invalid '
156 '(import of "%s" failed)') %
156 '(import of "%s" failed)') %
157 (hname, modname))
157 (hname, modname))
158 try:
158 try:
159 for p in funcname.split('.')[1:]:
159 for p in funcname.split('.')[1:]:
160 obj = getattr(obj, p)
160 obj = getattr(obj, p)
161 except AttributeError, err:
161 except AttributeError, err:
162 raise util.Abort(_('%s hook is invalid '
162 raise util.Abort(_('%s hook is invalid '
163 '("%s" is not defined)') %
163 '("%s" is not defined)') %
164 (hname, funcname))
164 (hname, funcname))
165 if not callable(obj):
165 if not callable(obj):
166 raise util.Abort(_('%s hook is invalid '
166 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not callable)') %
167 '("%s" is not callable)') %
168 (hname, funcname))
168 (hname, funcname))
169 try:
169 try:
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 except (KeyboardInterrupt, util.SignalInterrupt):
171 except (KeyboardInterrupt, util.SignalInterrupt):
172 raise
172 raise
173 except Exception, exc:
173 except Exception, exc:
174 if isinstance(exc, util.Abort):
174 if isinstance(exc, util.Abort):
175 self.ui.warn(_('error: %s hook failed: %s\n') %
175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 (hname, exc.args[0]))
176 (hname, exc.args[0]))
177 else:
177 else:
178 self.ui.warn(_('error: %s hook raised an exception: '
178 self.ui.warn(_('error: %s hook raised an exception: '
179 '%s\n') % (hname, exc))
179 '%s\n') % (hname, exc))
180 if throw:
180 if throw:
181 raise
181 raise
182 self.ui.print_exc()
182 self.ui.print_exc()
183 return True
183 return True
184 if r:
184 if r:
185 if throw:
185 if throw:
186 raise util.Abort(_('%s hook failed') % hname)
186 raise util.Abort(_('%s hook failed') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 return r
188 return r
189
189
190 def runhook(name, cmd):
190 def runhook(name, cmd):
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 r = util.system(cmd, environ=env, cwd=self.root)
193 r = util.system(cmd, environ=env, cwd=self.root)
194 if r:
194 if r:
195 desc, r = util.explain_exit(r)
195 desc, r = util.explain_exit(r)
196 if throw:
196 if throw:
197 raise util.Abort(_('%s hook %s') % (name, desc))
197 raise util.Abort(_('%s hook %s') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 return r
199 return r
200
200
201 r = False
201 r = False
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 if hname.split(".", 1)[0] == name and cmd]
203 if hname.split(".", 1)[0] == name and cmd]
204 hooks.sort()
204 hooks.sort()
205 for hname, cmd in hooks:
205 for hname, cmd in hooks:
206 if cmd.startswith('python:'):
206 if cmd.startswith('python:'):
207 r = callhook(hname, cmd[7:].strip()) or r
207 r = callhook(hname, cmd[7:].strip()) or r
208 else:
208 else:
209 r = runhook(hname, cmd) or r
209 r = runhook(hname, cmd) or r
210 return r
210 return r
211
211
212 tag_disallowed = ':\r\n'
212 tag_disallowed = ':\r\n'
213
213
214 def tag(self, name, node, message, local, user, date):
214 def tag(self, name, node, message, local, user, date):
215 '''tag a revision with a symbolic name.
215 '''tag a revision with a symbolic name.
216
216
217 if local is True, the tag is stored in a per-repository file.
217 if local is True, the tag is stored in a per-repository file.
218 otherwise, it is stored in the .hgtags file, and a new
218 otherwise, it is stored in the .hgtags file, and a new
219 changeset is committed with the change.
219 changeset is committed with the change.
220
220
221 keyword arguments:
221 keyword arguments:
222
222
223 local: whether to store tag in non-version-controlled file
223 local: whether to store tag in non-version-controlled file
224 (default False)
224 (default False)
225
225
226 message: commit message to use if committing
226 message: commit message to use if committing
227
227
228 user: name of user to use if committing
228 user: name of user to use if committing
229
229
230 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
231
231
232 for c in self.tag_disallowed:
232 for c in self.tag_disallowed:
233 if c in name:
233 if c in name:
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235
235
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237
237
238 if local:
238 if local:
239 # local tags are stored in the current charset
239 # local tags are stored in the current charset
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 self.hook('tag', node=hex(node), tag=name, local=local)
241 self.hook('tag', node=hex(node), tag=name, local=local)
242 return
242 return
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 # committed tags are stored in UTF-8
249 # committed tags are stored in UTF-8
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 self.wfile('.hgtags', 'ab').write(line)
251 self.wfile('.hgtags', 'ab').write(line)
252 if self.dirstate.state('.hgtags') == '?':
252 if self.dirstate.state('.hgtags') == '?':
253 self.add(['.hgtags'])
253 self.add(['.hgtags'])
254
254
255 self.commit(['.hgtags'], message, user, date)
255 self.commit(['.hgtags'], message, user, date)
256 self.hook('tag', node=hex(node), tag=name, local=local)
256 self.hook('tag', node=hex(node), tag=name, local=local)
257
257
258 def tags(self):
258 def tags(self):
259 '''return a mapping of tag to node'''
259 '''return a mapping of tag to node'''
260 if self.tagscache:
260 if self.tagscache:
261 return self.tagscache
261 return self.tagscache
262
262
263 globaltags = {}
263 globaltags = {}
264
264
265 def readtags(lines, fn):
265 def readtags(lines, fn):
266 filetags = {}
266 filetags = {}
267 count = 0
267 count = 0
268
268
269 def warn(msg):
269 def warn(msg):
270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
271
271
272 for l in lines:
272 for l in lines:
273 count += 1
273 count += 1
274 if not l:
274 if not l:
275 continue
275 continue
276 s = l.split(" ", 1)
276 s = l.split(" ", 1)
277 if len(s) != 2:
277 if len(s) != 2:
278 warn(_("cannot parse entry"))
278 warn(_("cannot parse entry"))
279 continue
279 continue
280 node, key = s
280 node, key = s
281 key = util.tolocal(key.strip()) # stored in UTF-8
281 key = util.tolocal(key.strip()) # stored in UTF-8
282 try:
282 try:
283 bin_n = bin(node)
283 bin_n = bin(node)
284 except TypeError:
284 except TypeError:
285 warn(_("node '%s' is not well formed") % node)
285 warn(_("node '%s' is not well formed") % node)
286 continue
286 continue
287 if bin_n not in self.changelog.nodemap:
287 if bin_n not in self.changelog.nodemap:
288 warn(_("tag '%s' refers to unknown node") % key)
288 warn(_("tag '%s' refers to unknown node") % key)
289 continue
289 continue
290
290
291 h = {}
291 h = []
292 if key in filetags:
292 if key in filetags:
293 n, h = filetags[key]
293 n, h = filetags[key]
294 h[n] = True
294 h.append(n)
295 filetags[key] = (bin_n, h)
295 filetags[key] = (bin_n, h)
296
296
297 for k,nh in filetags.items():
297 for k,nh in filetags.items():
298 if k not in globaltags:
298 if k not in globaltags:
299 globaltags[k] = nh
299 globaltags[k] = nh
300 continue
300 continue
301 # we prefer the global tag if:
301 # we prefer the global tag if:
302 # it supercedes us OR
302 # it supercedes us OR
303 # mutual supercedes and it has a higher rank
303 # mutual supercedes and it has a higher rank
304 # otherwise we win because we're tip-most
304 # otherwise we win because we're tip-most
305 an, ah = nh
305 an, ah = nh
306 bn, bh = globaltags[k]
306 bn, bh = globaltags[k]
307 if bn != an and an in bh and \
307 if bn != an and an in bh and \
308 (bn not in ah or len(bh) > len(ah)):
308 (bn not in ah or len(bh) > len(ah)):
309 an = bn
309 an = bn
310 ah.update(bh)
310 ah.append([n for n in bh if n not in ah])
311 globaltags[k] = an, ah
311 globaltags[k] = an, ah
312
312
313 # read the tags file from each head, ending with the tip
313 # read the tags file from each head, ending with the tip
314 f = None
314 f = None
315 for rev, node, fnode in self._hgtagsnodes():
315 for rev, node, fnode in self._hgtagsnodes():
316 f = (f and f.filectx(fnode) or
316 f = (f and f.filectx(fnode) or
317 self.filectx('.hgtags', fileid=fnode))
317 self.filectx('.hgtags', fileid=fnode))
318 readtags(f.data().splitlines(), f)
318 readtags(f.data().splitlines(), f)
319
319
320 try:
320 try:
321 data = util.fromlocal(self.opener("localtags").read())
321 data = util.fromlocal(self.opener("localtags").read())
322 # localtags are stored in the local character set
322 # localtags are stored in the local character set
323 # while the internal tag table is stored in UTF-8
323 # while the internal tag table is stored in UTF-8
324 readtags(data.splitlines(), "localtags")
324 readtags(data.splitlines(), "localtags")
325 except IOError:
325 except IOError:
326 pass
326 pass
327
327
328 self.tagscache = {}
328 self.tagscache = {}
329 for k,nh in globaltags.items():
329 for k,nh in globaltags.items():
330 n = nh[0]
330 n = nh[0]
331 if n != nullid:
331 if n != nullid:
332 self.tagscache[k] = n
332 self.tagscache[k] = n
333 self.tagscache['tip'] = self.changelog.tip()
333 self.tagscache['tip'] = self.changelog.tip()
334
334
335 return self.tagscache
335 return self.tagscache
336
336
337 def _hgtagsnodes(self):
337 def _hgtagsnodes(self):
338 heads = self.heads()
338 heads = self.heads()
339 heads.reverse()
339 heads.reverse()
340 last = {}
340 last = {}
341 ret = []
341 ret = []
342 for node in heads:
342 for node in heads:
343 c = self.changectx(node)
343 c = self.changectx(node)
344 rev = c.rev()
344 rev = c.rev()
345 try:
345 try:
346 fnode = c.filenode('.hgtags')
346 fnode = c.filenode('.hgtags')
347 except repo.LookupError:
347 except repo.LookupError:
348 continue
348 continue
349 ret.append((rev, node, fnode))
349 ret.append((rev, node, fnode))
350 if fnode in last:
350 if fnode in last:
351 ret[last[fnode]] = None
351 ret[last[fnode]] = None
352 last[fnode] = len(ret) - 1
352 last[fnode] = len(ret) - 1
353 return [item for item in ret if item]
353 return [item for item in ret if item]
354
354
355 def tagslist(self):
355 def tagslist(self):
356 '''return a list of tags ordered by revision'''
356 '''return a list of tags ordered by revision'''
357 l = []
357 l = []
358 for t, n in self.tags().items():
358 for t, n in self.tags().items():
359 try:
359 try:
360 r = self.changelog.rev(n)
360 r = self.changelog.rev(n)
361 except:
361 except:
362 r = -2 # sort to the beginning of the list if unknown
362 r = -2 # sort to the beginning of the list if unknown
363 l.append((r, t, n))
363 l.append((r, t, n))
364 l.sort()
364 l.sort()
365 return [(t, n) for r, t, n in l]
365 return [(t, n) for r, t, n in l]
366
366
367 def nodetags(self, node):
367 def nodetags(self, node):
368 '''return the tags associated with a node'''
368 '''return the tags associated with a node'''
369 if not self.nodetagscache:
369 if not self.nodetagscache:
370 self.nodetagscache = {}
370 self.nodetagscache = {}
371 for t, n in self.tags().items():
371 for t, n in self.tags().items():
372 self.nodetagscache.setdefault(n, []).append(t)
372 self.nodetagscache.setdefault(n, []).append(t)
373 return self.nodetagscache.get(node, [])
373 return self.nodetagscache.get(node, [])
374
374
375 def _branchtags(self):
375 def _branchtags(self):
376 partial, last, lrev = self._readbranchcache()
376 partial, last, lrev = self._readbranchcache()
377
377
378 tiprev = self.changelog.count() - 1
378 tiprev = self.changelog.count() - 1
379 if lrev != tiprev:
379 if lrev != tiprev:
380 self._updatebranchcache(partial, lrev+1, tiprev+1)
380 self._updatebranchcache(partial, lrev+1, tiprev+1)
381 self._writebranchcache(partial, self.changelog.tip(), tiprev)
381 self._writebranchcache(partial, self.changelog.tip(), tiprev)
382
382
383 return partial
383 return partial
384
384
385 def branchtags(self):
385 def branchtags(self):
386 if self.branchcache is not None:
386 if self.branchcache is not None:
387 return self.branchcache
387 return self.branchcache
388
388
389 self.branchcache = {} # avoid recursion in changectx
389 self.branchcache = {} # avoid recursion in changectx
390 partial = self._branchtags()
390 partial = self._branchtags()
391
391
392 # the branch cache is stored on disk as UTF-8, but in the local
392 # the branch cache is stored on disk as UTF-8, but in the local
393 # charset internally
393 # charset internally
394 for k, v in partial.items():
394 for k, v in partial.items():
395 self.branchcache[util.tolocal(k)] = v
395 self.branchcache[util.tolocal(k)] = v
396 return self.branchcache
396 return self.branchcache
397
397
398 def _readbranchcache(self):
398 def _readbranchcache(self):
399 partial = {}
399 partial = {}
400 try:
400 try:
401 f = self.opener("branch.cache")
401 f = self.opener("branch.cache")
402 lines = f.read().split('\n')
402 lines = f.read().split('\n')
403 f.close()
403 f.close()
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if not (lrev < self.changelog.count() and
406 if not (lrev < self.changelog.count() and
407 self.changelog.node(lrev) == last): # sanity check
407 self.changelog.node(lrev) == last): # sanity check
408 # invalidate the cache
408 # invalidate the cache
409 raise ValueError('Invalid branch cache: unknown tip')
409 raise ValueError('Invalid branch cache: unknown tip')
410 for l in lines:
410 for l in lines:
411 if not l: continue
411 if not l: continue
412 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
415 raise
416 except Exception, inst:
416 except Exception, inst:
417 if self.ui.debugflag:
417 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
420 return partial, last, lrev
421
421
422 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
423 try:
423 try:
424 f = self.opener("branch.cache", "w")
424 f = self.opener("branch.cache", "w")
425 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
428 except IOError:
428 except IOError:
429 pass
429 pass
430
430
431 def _updatebranchcache(self, partial, start, end):
431 def _updatebranchcache(self, partial, start, end):
432 for r in xrange(start, end):
432 for r in xrange(start, end):
433 c = self.changectx(r)
433 c = self.changectx(r)
434 b = c.branch()
434 b = c.branch()
435 partial[b] = c.node()
435 partial[b] = c.node()
436
436
437 def lookup(self, key):
437 def lookup(self, key):
438 if key == '.':
438 if key == '.':
439 key = self.dirstate.parents()[0]
439 key = self.dirstate.parents()[0]
440 if key == nullid:
440 if key == nullid:
441 raise repo.RepoError(_("no revision checked out"))
441 raise repo.RepoError(_("no revision checked out"))
442 elif key == 'null':
442 elif key == 'null':
443 return nullid
443 return nullid
444 n = self.changelog._match(key)
444 n = self.changelog._match(key)
445 if n:
445 if n:
446 return n
446 return n
447 if key in self.tags():
447 if key in self.tags():
448 return self.tags()[key]
448 return self.tags()[key]
449 if key in self.branchtags():
449 if key in self.branchtags():
450 return self.branchtags()[key]
450 return self.branchtags()[key]
451 n = self.changelog._partialmatch(key)
451 n = self.changelog._partialmatch(key)
452 if n:
452 if n:
453 return n
453 return n
454 raise repo.RepoError(_("unknown revision '%s'") % key)
454 raise repo.RepoError(_("unknown revision '%s'") % key)
455
455
456 def dev(self):
456 def dev(self):
457 return os.lstat(self.path).st_dev
457 return os.lstat(self.path).st_dev
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def sjoin(self, f):
465 def sjoin(self, f):
466 f = self.encodefn(f)
466 f = self.encodefn(f)
467 return os.path.join(self.spath, f)
467 return os.path.join(self.spath, f)
468
468
469 def wjoin(self, f):
469 def wjoin(self, f):
470 return os.path.join(self.root, f)
470 return os.path.join(self.root, f)
471
471
472 def file(self, f):
472 def file(self, f):
473 if f[0] == '/':
473 if f[0] == '/':
474 f = f[1:]
474 f = f[1:]
475 return filelog.filelog(self.sopener, f, self.revlogversion)
475 return filelog.filelog(self.sopener, f, self.revlogversion)
476
476
477 def changectx(self, changeid=None):
477 def changectx(self, changeid=None):
478 return context.changectx(self, changeid)
478 return context.changectx(self, changeid)
479
479
480 def workingctx(self):
480 def workingctx(self):
481 return context.workingctx(self)
481 return context.workingctx(self)
482
482
483 def parents(self, changeid=None):
483 def parents(self, changeid=None):
484 '''
484 '''
485 get list of changectxs for parents of changeid or working directory
485 get list of changectxs for parents of changeid or working directory
486 '''
486 '''
487 if changeid is None:
487 if changeid is None:
488 pl = self.dirstate.parents()
488 pl = self.dirstate.parents()
489 else:
489 else:
490 n = self.changelog.lookup(changeid)
490 n = self.changelog.lookup(changeid)
491 pl = self.changelog.parents(n)
491 pl = self.changelog.parents(n)
492 if pl[1] == nullid:
492 if pl[1] == nullid:
493 return [self.changectx(pl[0])]
493 return [self.changectx(pl[0])]
494 return [self.changectx(pl[0]), self.changectx(pl[1])]
494 return [self.changectx(pl[0]), self.changectx(pl[1])]
495
495
496 def filectx(self, path, changeid=None, fileid=None):
496 def filectx(self, path, changeid=None, fileid=None):
497 """changeid can be a changeset revision, node, or tag.
497 """changeid can be a changeset revision, node, or tag.
498 fileid can be a file revision or node."""
498 fileid can be a file revision or node."""
499 return context.filectx(self, path, changeid, fileid)
499 return context.filectx(self, path, changeid, fileid)
500
500
501 def getcwd(self):
501 def getcwd(self):
502 return self.dirstate.getcwd()
502 return self.dirstate.getcwd()
503
503
504 def wfile(self, f, mode='r'):
504 def wfile(self, f, mode='r'):
505 return self.wopener(f, mode)
505 return self.wopener(f, mode)
506
506
507 def wread(self, filename):
507 def wread(self, filename):
508 if self.encodepats == None:
508 if self.encodepats == None:
509 l = []
509 l = []
510 for pat, cmd in self.ui.configitems("encode"):
510 for pat, cmd in self.ui.configitems("encode"):
511 mf = util.matcher(self.root, "", [pat], [], [])[1]
511 mf = util.matcher(self.root, "", [pat], [], [])[1]
512 l.append((mf, cmd))
512 l.append((mf, cmd))
513 self.encodepats = l
513 self.encodepats = l
514
514
515 data = self.wopener(filename, 'r').read()
515 data = self.wopener(filename, 'r').read()
516
516
517 for mf, cmd in self.encodepats:
517 for mf, cmd in self.encodepats:
518 if mf(filename):
518 if mf(filename):
519 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
519 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
520 data = util.filter(data, cmd)
520 data = util.filter(data, cmd)
521 break
521 break
522
522
523 return data
523 return data
524
524
525 def wwrite(self, filename, data, fd=None):
525 def wwrite(self, filename, data, fd=None):
526 if self.decodepats == None:
526 if self.decodepats == None:
527 l = []
527 l = []
528 for pat, cmd in self.ui.configitems("decode"):
528 for pat, cmd in self.ui.configitems("decode"):
529 mf = util.matcher(self.root, "", [pat], [], [])[1]
529 mf = util.matcher(self.root, "", [pat], [], [])[1]
530 l.append((mf, cmd))
530 l.append((mf, cmd))
531 self.decodepats = l
531 self.decodepats = l
532
532
533 for mf, cmd in self.decodepats:
533 for mf, cmd in self.decodepats:
534 if mf(filename):
534 if mf(filename):
535 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
535 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
536 data = util.filter(data, cmd)
536 data = util.filter(data, cmd)
537 break
537 break
538
538
539 if fd:
539 if fd:
540 return fd.write(data)
540 return fd.write(data)
541 return self.wopener(filename, 'w').write(data)
541 return self.wopener(filename, 'w').write(data)
542
542
543 def transaction(self):
543 def transaction(self):
544 tr = self.transhandle
544 tr = self.transhandle
545 if tr != None and tr.running():
545 if tr != None and tr.running():
546 return tr.nest()
546 return tr.nest()
547
547
548 # save dirstate for rollback
548 # save dirstate for rollback
549 try:
549 try:
550 ds = self.opener("dirstate").read()
550 ds = self.opener("dirstate").read()
551 except IOError:
551 except IOError:
552 ds = ""
552 ds = ""
553 self.opener("journal.dirstate", "w").write(ds)
553 self.opener("journal.dirstate", "w").write(ds)
554
554
555 renames = [(self.sjoin("journal"), self.sjoin("undo")),
555 renames = [(self.sjoin("journal"), self.sjoin("undo")),
556 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
556 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
557 tr = transaction.transaction(self.ui.warn, self.sopener,
557 tr = transaction.transaction(self.ui.warn, self.sopener,
558 self.sjoin("journal"),
558 self.sjoin("journal"),
559 aftertrans(renames))
559 aftertrans(renames))
560 self.transhandle = tr
560 self.transhandle = tr
561 return tr
561 return tr
562
562
563 def recover(self):
563 def recover(self):
564 l = self.lock()
564 l = self.lock()
565 if os.path.exists(self.sjoin("journal")):
565 if os.path.exists(self.sjoin("journal")):
566 self.ui.status(_("rolling back interrupted transaction\n"))
566 self.ui.status(_("rolling back interrupted transaction\n"))
567 transaction.rollback(self.sopener, self.sjoin("journal"))
567 transaction.rollback(self.sopener, self.sjoin("journal"))
568 self.reload()
568 self.reload()
569 return True
569 return True
570 else:
570 else:
571 self.ui.warn(_("no interrupted transaction available\n"))
571 self.ui.warn(_("no interrupted transaction available\n"))
572 return False
572 return False
573
573
574 def rollback(self, wlock=None):
574 def rollback(self, wlock=None):
575 if not wlock:
575 if not wlock:
576 wlock = self.wlock()
576 wlock = self.wlock()
577 l = self.lock()
577 l = self.lock()
578 if os.path.exists(self.sjoin("undo")):
578 if os.path.exists(self.sjoin("undo")):
579 self.ui.status(_("rolling back last transaction\n"))
579 self.ui.status(_("rolling back last transaction\n"))
580 transaction.rollback(self.sopener, self.sjoin("undo"))
580 transaction.rollback(self.sopener, self.sjoin("undo"))
581 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
581 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
582 self.reload()
582 self.reload()
583 self.wreload()
583 self.wreload()
584 else:
584 else:
585 self.ui.warn(_("no rollback information available\n"))
585 self.ui.warn(_("no rollback information available\n"))
586
586
587 def wreload(self):
587 def wreload(self):
588 self.dirstate.read()
588 self.dirstate.read()
589
589
590 def reload(self):
590 def reload(self):
591 self.changelog.load()
591 self.changelog.load()
592 self.manifest.load()
592 self.manifest.load()
593 self.tagscache = None
593 self.tagscache = None
594 self.nodetagscache = None
594 self.nodetagscache = None
595
595
596 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
596 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
597 desc=None):
597 desc=None):
598 try:
598 try:
599 l = lock.lock(lockname, 0, releasefn, desc=desc)
599 l = lock.lock(lockname, 0, releasefn, desc=desc)
600 except lock.LockHeld, inst:
600 except lock.LockHeld, inst:
601 if not wait:
601 if not wait:
602 raise
602 raise
603 self.ui.warn(_("waiting for lock on %s held by %r\n") %
603 self.ui.warn(_("waiting for lock on %s held by %r\n") %
604 (desc, inst.locker))
604 (desc, inst.locker))
605 # default to 600 seconds timeout
605 # default to 600 seconds timeout
606 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
606 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
607 releasefn, desc=desc)
607 releasefn, desc=desc)
608 if acquirefn:
608 if acquirefn:
609 acquirefn()
609 acquirefn()
610 return l
610 return l
611
611
612 def lock(self, wait=1):
612 def lock(self, wait=1):
613 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
613 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
614 desc=_('repository %s') % self.origroot)
614 desc=_('repository %s') % self.origroot)
615
615
616 def wlock(self, wait=1):
616 def wlock(self, wait=1):
617 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
617 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
618 self.wreload,
618 self.wreload,
619 desc=_('working directory of %s') % self.origroot)
619 desc=_('working directory of %s') % self.origroot)
620
620
621 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
621 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
622 """
622 """
623 commit an individual file as part of a larger transaction
623 commit an individual file as part of a larger transaction
624 """
624 """
625
625
626 t = self.wread(fn)
626 t = self.wread(fn)
627 fl = self.file(fn)
627 fl = self.file(fn)
628 fp1 = manifest1.get(fn, nullid)
628 fp1 = manifest1.get(fn, nullid)
629 fp2 = manifest2.get(fn, nullid)
629 fp2 = manifest2.get(fn, nullid)
630
630
631 meta = {}
631 meta = {}
632 cp = self.dirstate.copied(fn)
632 cp = self.dirstate.copied(fn)
633 if cp:
633 if cp:
634 # Mark the new revision of this file as a copy of another
634 # Mark the new revision of this file as a copy of another
635 # file. This copy data will effectively act as a parent
635 # file. This copy data will effectively act as a parent
636 # of this new revision. If this is a merge, the first
636 # of this new revision. If this is a merge, the first
637 # parent will be the nullid (meaning "look up the copy data")
637 # parent will be the nullid (meaning "look up the copy data")
638 # and the second one will be the other parent. For example:
638 # and the second one will be the other parent. For example:
639 #
639 #
640 # 0 --- 1 --- 3 rev1 changes file foo
640 # 0 --- 1 --- 3 rev1 changes file foo
641 # \ / rev2 renames foo to bar and changes it
641 # \ / rev2 renames foo to bar and changes it
642 # \- 2 -/ rev3 should have bar with all changes and
642 # \- 2 -/ rev3 should have bar with all changes and
643 # should record that bar descends from
643 # should record that bar descends from
644 # bar in rev2 and foo in rev1
644 # bar in rev2 and foo in rev1
645 #
645 #
646 # this allows this merge to succeed:
646 # this allows this merge to succeed:
647 #
647 #
648 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
648 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
649 # \ / merging rev3 and rev4 should use bar@rev2
649 # \ / merging rev3 and rev4 should use bar@rev2
650 # \- 2 --- 4 as the merge base
650 # \- 2 --- 4 as the merge base
651 #
651 #
652 meta["copy"] = cp
652 meta["copy"] = cp
653 if not manifest2: # not a branch merge
653 if not manifest2: # not a branch merge
654 meta["copyrev"] = hex(manifest1.get(cp, nullid))
654 meta["copyrev"] = hex(manifest1.get(cp, nullid))
655 fp2 = nullid
655 fp2 = nullid
656 elif fp2 != nullid: # copied on remote side
656 elif fp2 != nullid: # copied on remote side
657 meta["copyrev"] = hex(manifest1.get(cp, nullid))
657 meta["copyrev"] = hex(manifest1.get(cp, nullid))
658 elif fp1 != nullid: # copied on local side, reversed
658 elif fp1 != nullid: # copied on local side, reversed
659 meta["copyrev"] = hex(manifest2.get(cp))
659 meta["copyrev"] = hex(manifest2.get(cp))
660 fp2 = fp1
660 fp2 = fp1
661 else: # directory rename
661 else: # directory rename
662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
663 self.ui.debug(_(" %s: copy %s:%s\n") %
663 self.ui.debug(_(" %s: copy %s:%s\n") %
664 (fn, cp, meta["copyrev"]))
664 (fn, cp, meta["copyrev"]))
665 fp1 = nullid
665 fp1 = nullid
666 elif fp2 != nullid:
666 elif fp2 != nullid:
667 # is one parent an ancestor of the other?
667 # is one parent an ancestor of the other?
668 fpa = fl.ancestor(fp1, fp2)
668 fpa = fl.ancestor(fp1, fp2)
669 if fpa == fp1:
669 if fpa == fp1:
670 fp1, fp2 = fp2, nullid
670 fp1, fp2 = fp2, nullid
671 elif fpa == fp2:
671 elif fpa == fp2:
672 fp2 = nullid
672 fp2 = nullid
673
673
674 # is the file unmodified from the parent? report existing entry
674 # is the file unmodified from the parent? report existing entry
675 if fp2 == nullid and not fl.cmp(fp1, t):
675 if fp2 == nullid and not fl.cmp(fp1, t):
676 return fp1
676 return fp1
677
677
678 changelist.append(fn)
678 changelist.append(fn)
679 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
679 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
680
680
681 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
681 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
682 if p1 is None:
682 if p1 is None:
683 p1, p2 = self.dirstate.parents()
683 p1, p2 = self.dirstate.parents()
684 return self.commit(files=files, text=text, user=user, date=date,
684 return self.commit(files=files, text=text, user=user, date=date,
685 p1=p1, p2=p2, wlock=wlock)
685 p1=p1, p2=p2, wlock=wlock)
686
686
687 def commit(self, files=None, text="", user=None, date=None,
687 def commit(self, files=None, text="", user=None, date=None,
688 match=util.always, force=False, lock=None, wlock=None,
688 match=util.always, force=False, lock=None, wlock=None,
689 force_editor=False, p1=None, p2=None, extra={}):
689 force_editor=False, p1=None, p2=None, extra={}):
690
690
691 commit = []
691 commit = []
692 remove = []
692 remove = []
693 changed = []
693 changed = []
694 use_dirstate = (p1 is None) # not rawcommit
694 use_dirstate = (p1 is None) # not rawcommit
695 extra = extra.copy()
695 extra = extra.copy()
696
696
697 if use_dirstate:
697 if use_dirstate:
698 if files:
698 if files:
699 for f in files:
699 for f in files:
700 s = self.dirstate.state(f)
700 s = self.dirstate.state(f)
701 if s in 'nmai':
701 if s in 'nmai':
702 commit.append(f)
702 commit.append(f)
703 elif s == 'r':
703 elif s == 'r':
704 remove.append(f)
704 remove.append(f)
705 else:
705 else:
706 self.ui.warn(_("%s not tracked!\n") % f)
706 self.ui.warn(_("%s not tracked!\n") % f)
707 else:
707 else:
708 changes = self.status(match=match)[:5]
708 changes = self.status(match=match)[:5]
709 modified, added, removed, deleted, unknown = changes
709 modified, added, removed, deleted, unknown = changes
710 commit = modified + added
710 commit = modified + added
711 remove = removed
711 remove = removed
712 else:
712 else:
713 commit = files
713 commit = files
714
714
715 if use_dirstate:
715 if use_dirstate:
716 p1, p2 = self.dirstate.parents()
716 p1, p2 = self.dirstate.parents()
717 update_dirstate = True
717 update_dirstate = True
718 else:
718 else:
719 p1, p2 = p1, p2 or nullid
719 p1, p2 = p1, p2 or nullid
720 update_dirstate = (self.dirstate.parents()[0] == p1)
720 update_dirstate = (self.dirstate.parents()[0] == p1)
721
721
722 c1 = self.changelog.read(p1)
722 c1 = self.changelog.read(p1)
723 c2 = self.changelog.read(p2)
723 c2 = self.changelog.read(p2)
724 m1 = self.manifest.read(c1[0]).copy()
724 m1 = self.manifest.read(c1[0]).copy()
725 m2 = self.manifest.read(c2[0])
725 m2 = self.manifest.read(c2[0])
726
726
727 if use_dirstate:
727 if use_dirstate:
728 branchname = self.workingctx().branch()
728 branchname = self.workingctx().branch()
729 try:
729 try:
730 branchname = branchname.decode('UTF-8').encode('UTF-8')
730 branchname = branchname.decode('UTF-8').encode('UTF-8')
731 except UnicodeDecodeError:
731 except UnicodeDecodeError:
732 raise util.Abort(_('branch name not in UTF-8!'))
732 raise util.Abort(_('branch name not in UTF-8!'))
733 else:
733 else:
734 branchname = ""
734 branchname = ""
735
735
736 if use_dirstate:
736 if use_dirstate:
737 oldname = c1[5].get("branch") # stored in UTF-8
737 oldname = c1[5].get("branch") # stored in UTF-8
738 if not commit and not remove and not force and p2 == nullid and \
738 if not commit and not remove and not force and p2 == nullid and \
739 branchname == oldname:
739 branchname == oldname:
740 self.ui.status(_("nothing changed\n"))
740 self.ui.status(_("nothing changed\n"))
741 return None
741 return None
742
742
743 xp1 = hex(p1)
743 xp1 = hex(p1)
744 if p2 == nullid: xp2 = ''
744 if p2 == nullid: xp2 = ''
745 else: xp2 = hex(p2)
745 else: xp2 = hex(p2)
746
746
747 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
747 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
748
748
749 if not wlock:
749 if not wlock:
750 wlock = self.wlock()
750 wlock = self.wlock()
751 if not lock:
751 if not lock:
752 lock = self.lock()
752 lock = self.lock()
753 tr = self.transaction()
753 tr = self.transaction()
754
754
755 # check in files
755 # check in files
756 new = {}
756 new = {}
757 linkrev = self.changelog.count()
757 linkrev = self.changelog.count()
758 commit.sort()
758 commit.sort()
759 for f in commit:
759 for f in commit:
760 self.ui.note(f + "\n")
760 self.ui.note(f + "\n")
761 try:
761 try:
762 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
762 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
763 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
763 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
764 except IOError:
764 except IOError:
765 if use_dirstate:
765 if use_dirstate:
766 self.ui.warn(_("trouble committing %s!\n") % f)
766 self.ui.warn(_("trouble committing %s!\n") % f)
767 raise
767 raise
768 else:
768 else:
769 remove.append(f)
769 remove.append(f)
770
770
771 # update manifest
771 # update manifest
772 m1.update(new)
772 m1.update(new)
773 remove.sort()
773 remove.sort()
774
774
775 for f in remove:
775 for f in remove:
776 if f in m1:
776 if f in m1:
777 del m1[f]
777 del m1[f]
778 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
778 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
779
779
780 # add changeset
780 # add changeset
781 new = new.keys()
781 new = new.keys()
782 new.sort()
782 new.sort()
783
783
784 user = user or self.ui.username()
784 user = user or self.ui.username()
785 if not text or force_editor:
785 if not text or force_editor:
786 edittext = []
786 edittext = []
787 if text:
787 if text:
788 edittext.append(text)
788 edittext.append(text)
789 edittext.append("")
789 edittext.append("")
790 edittext.append("HG: user: %s" % user)
790 edittext.append("HG: user: %s" % user)
791 if p2 != nullid:
791 if p2 != nullid:
792 edittext.append("HG: branch merge")
792 edittext.append("HG: branch merge")
793 edittext.extend(["HG: changed %s" % f for f in changed])
793 edittext.extend(["HG: changed %s" % f for f in changed])
794 edittext.extend(["HG: removed %s" % f for f in remove])
794 edittext.extend(["HG: removed %s" % f for f in remove])
795 if not changed and not remove:
795 if not changed and not remove:
796 edittext.append("HG: no files changed")
796 edittext.append("HG: no files changed")
797 edittext.append("")
797 edittext.append("")
798 # run editor in the repository root
798 # run editor in the repository root
799 olddir = os.getcwd()
799 olddir = os.getcwd()
800 os.chdir(self.root)
800 os.chdir(self.root)
801 text = self.ui.edit("\n".join(edittext), user)
801 text = self.ui.edit("\n".join(edittext), user)
802 os.chdir(olddir)
802 os.chdir(olddir)
803
803
804 lines = [line.rstrip() for line in text.rstrip().splitlines()]
804 lines = [line.rstrip() for line in text.rstrip().splitlines()]
805 while lines and not lines[0]:
805 while lines and not lines[0]:
806 del lines[0]
806 del lines[0]
807 if not lines:
807 if not lines:
808 return None
808 return None
809 text = '\n'.join(lines)
809 text = '\n'.join(lines)
810 if branchname:
810 if branchname:
811 extra["branch"] = branchname
811 extra["branch"] = branchname
812 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
812 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
813 user, date, extra)
813 user, date, extra)
814 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
814 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
815 parent2=xp2)
815 parent2=xp2)
816 tr.close()
816 tr.close()
817
817
818 if use_dirstate or update_dirstate:
818 if use_dirstate or update_dirstate:
819 self.dirstate.setparents(n)
819 self.dirstate.setparents(n)
820 if use_dirstate:
820 if use_dirstate:
821 self.dirstate.update(new, "n")
821 self.dirstate.update(new, "n")
822 self.dirstate.forget(remove)
822 self.dirstate.forget(remove)
823
823
824 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
824 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
825 return n
825 return n
826
826
827 def walk(self, node=None, files=[], match=util.always, badmatch=None):
827 def walk(self, node=None, files=[], match=util.always, badmatch=None):
828 '''
828 '''
829 walk recursively through the directory tree or a given
829 walk recursively through the directory tree or a given
830 changeset, finding all files matched by the match
830 changeset, finding all files matched by the match
831 function
831 function
832
832
833 results are yielded in a tuple (src, filename), where src
833 results are yielded in a tuple (src, filename), where src
834 is one of:
834 is one of:
835 'f' the file was found in the directory tree
835 'f' the file was found in the directory tree
836 'm' the file was only in the dirstate and not in the tree
836 'm' the file was only in the dirstate and not in the tree
837 'b' file was not found and matched badmatch
837 'b' file was not found and matched badmatch
838 '''
838 '''
839
839
840 if node:
840 if node:
841 fdict = dict.fromkeys(files)
841 fdict = dict.fromkeys(files)
842 for fn in self.manifest.read(self.changelog.read(node)[0]):
842 for fn in self.manifest.read(self.changelog.read(node)[0]):
843 for ffn in fdict:
843 for ffn in fdict:
844 # match if the file is the exact name or a directory
844 # match if the file is the exact name or a directory
845 if ffn == fn or fn.startswith("%s/" % ffn):
845 if ffn == fn or fn.startswith("%s/" % ffn):
846 del fdict[ffn]
846 del fdict[ffn]
847 break
847 break
848 if match(fn):
848 if match(fn):
849 yield 'm', fn
849 yield 'm', fn
850 for fn in fdict:
850 for fn in fdict:
851 if badmatch and badmatch(fn):
851 if badmatch and badmatch(fn):
852 if match(fn):
852 if match(fn):
853 yield 'b', fn
853 yield 'b', fn
854 else:
854 else:
855 self.ui.warn(_('%s: No such file in rev %s\n') % (
855 self.ui.warn(_('%s: No such file in rev %s\n') % (
856 util.pathto(self.root, self.getcwd(), fn), short(node)))
856 util.pathto(self.root, self.getcwd(), fn), short(node)))
857 else:
857 else:
858 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
858 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
859 yield src, fn
859 yield src, fn
860
860
861 def status(self, node1=None, node2=None, files=[], match=util.always,
861 def status(self, node1=None, node2=None, files=[], match=util.always,
862 wlock=None, list_ignored=False, list_clean=False):
862 wlock=None, list_ignored=False, list_clean=False):
863 """return status of files between two nodes or node and working directory
863 """return status of files between two nodes or node and working directory
864
864
865 If node1 is None, use the first dirstate parent instead.
865 If node1 is None, use the first dirstate parent instead.
866 If node2 is None, compare node1 with working directory.
866 If node2 is None, compare node1 with working directory.
867 """
867 """
868
868
869 def fcmp(fn, mf):
869 def fcmp(fn, mf):
870 t1 = self.wread(fn)
870 t1 = self.wread(fn)
871 return self.file(fn).cmp(mf.get(fn, nullid), t1)
871 return self.file(fn).cmp(mf.get(fn, nullid), t1)
872
872
873 def mfmatches(node):
873 def mfmatches(node):
874 change = self.changelog.read(node)
874 change = self.changelog.read(node)
875 mf = self.manifest.read(change[0]).copy()
875 mf = self.manifest.read(change[0]).copy()
876 for fn in mf.keys():
876 for fn in mf.keys():
877 if not match(fn):
877 if not match(fn):
878 del mf[fn]
878 del mf[fn]
879 return mf
879 return mf
880
880
881 modified, added, removed, deleted, unknown = [], [], [], [], []
881 modified, added, removed, deleted, unknown = [], [], [], [], []
882 ignored, clean = [], []
882 ignored, clean = [], []
883
883
884 compareworking = False
884 compareworking = False
885 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
885 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
886 compareworking = True
886 compareworking = True
887
887
888 if not compareworking:
888 if not compareworking:
889 # read the manifest from node1 before the manifest from node2,
889 # read the manifest from node1 before the manifest from node2,
890 # so that we'll hit the manifest cache if we're going through
890 # so that we'll hit the manifest cache if we're going through
891 # all the revisions in parent->child order.
891 # all the revisions in parent->child order.
892 mf1 = mfmatches(node1)
892 mf1 = mfmatches(node1)
893
893
894 # are we comparing the working directory?
894 # are we comparing the working directory?
895 if not node2:
895 if not node2:
896 if not wlock:
896 if not wlock:
897 try:
897 try:
898 wlock = self.wlock(wait=0)
898 wlock = self.wlock(wait=0)
899 except lock.LockException:
899 except lock.LockException:
900 wlock = None
900 wlock = None
901 (lookup, modified, added, removed, deleted, unknown,
901 (lookup, modified, added, removed, deleted, unknown,
902 ignored, clean) = self.dirstate.status(files, match,
902 ignored, clean) = self.dirstate.status(files, match,
903 list_ignored, list_clean)
903 list_ignored, list_clean)
904
904
905 # are we comparing working dir against its parent?
905 # are we comparing working dir against its parent?
906 if compareworking:
906 if compareworking:
907 if lookup:
907 if lookup:
908 # do a full compare of any files that might have changed
908 # do a full compare of any files that might have changed
909 mf2 = mfmatches(self.dirstate.parents()[0])
909 mf2 = mfmatches(self.dirstate.parents()[0])
910 for f in lookup:
910 for f in lookup:
911 if fcmp(f, mf2):
911 if fcmp(f, mf2):
912 modified.append(f)
912 modified.append(f)
913 else:
913 else:
914 clean.append(f)
914 clean.append(f)
915 if wlock is not None:
915 if wlock is not None:
916 self.dirstate.update([f], "n")
916 self.dirstate.update([f], "n")
917 else:
917 else:
918 # we are comparing working dir against non-parent
918 # we are comparing working dir against non-parent
919 # generate a pseudo-manifest for the working dir
919 # generate a pseudo-manifest for the working dir
920 # XXX: create it in dirstate.py ?
920 # XXX: create it in dirstate.py ?
921 mf2 = mfmatches(self.dirstate.parents()[0])
921 mf2 = mfmatches(self.dirstate.parents()[0])
922 for f in lookup + modified + added:
922 for f in lookup + modified + added:
923 mf2[f] = ""
923 mf2[f] = ""
924 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
924 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
925 for f in removed:
925 for f in removed:
926 if f in mf2:
926 if f in mf2:
927 del mf2[f]
927 del mf2[f]
928 else:
928 else:
929 # we are comparing two revisions
929 # we are comparing two revisions
930 mf2 = mfmatches(node2)
930 mf2 = mfmatches(node2)
931
931
932 if not compareworking:
932 if not compareworking:
933 # flush lists from dirstate before comparing manifests
933 # flush lists from dirstate before comparing manifests
934 modified, added, clean = [], [], []
934 modified, added, clean = [], [], []
935
935
936 # make sure to sort the files so we talk to the disk in a
936 # make sure to sort the files so we talk to the disk in a
937 # reasonable order
937 # reasonable order
938 mf2keys = mf2.keys()
938 mf2keys = mf2.keys()
939 mf2keys.sort()
939 mf2keys.sort()
940 for fn in mf2keys:
940 for fn in mf2keys:
941 if mf1.has_key(fn):
941 if mf1.has_key(fn):
942 if mf1.flags(fn) != mf2.flags(fn) or \
942 if mf1.flags(fn) != mf2.flags(fn) or \
943 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
943 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
944 modified.append(fn)
944 modified.append(fn)
945 elif list_clean:
945 elif list_clean:
946 clean.append(fn)
946 clean.append(fn)
947 del mf1[fn]
947 del mf1[fn]
948 else:
948 else:
949 added.append(fn)
949 added.append(fn)
950
950
951 removed = mf1.keys()
951 removed = mf1.keys()
952
952
953 # sort and return results:
953 # sort and return results:
954 for l in modified, added, removed, deleted, unknown, ignored, clean:
954 for l in modified, added, removed, deleted, unknown, ignored, clean:
955 l.sort()
955 l.sort()
956 return (modified, added, removed, deleted, unknown, ignored, clean)
956 return (modified, added, removed, deleted, unknown, ignored, clean)
957
957
958 def add(self, list, wlock=None):
958 def add(self, list, wlock=None):
959 if not wlock:
959 if not wlock:
960 wlock = self.wlock()
960 wlock = self.wlock()
961 for f in list:
961 for f in list:
962 p = self.wjoin(f)
962 p = self.wjoin(f)
963 if not os.path.exists(p):
963 if not os.path.exists(p):
964 self.ui.warn(_("%s does not exist!\n") % f)
964 self.ui.warn(_("%s does not exist!\n") % f)
965 elif not os.path.isfile(p):
965 elif not os.path.isfile(p):
966 self.ui.warn(_("%s not added: only files supported currently\n")
966 self.ui.warn(_("%s not added: only files supported currently\n")
967 % f)
967 % f)
968 elif self.dirstate.state(f) in 'an':
968 elif self.dirstate.state(f) in 'an':
969 self.ui.warn(_("%s already tracked!\n") % f)
969 self.ui.warn(_("%s already tracked!\n") % f)
970 else:
970 else:
971 self.dirstate.update([f], "a")
971 self.dirstate.update([f], "a")
972
972
973 def forget(self, list, wlock=None):
973 def forget(self, list, wlock=None):
974 if not wlock:
974 if not wlock:
975 wlock = self.wlock()
975 wlock = self.wlock()
976 for f in list:
976 for f in list:
977 if self.dirstate.state(f) not in 'ai':
977 if self.dirstate.state(f) not in 'ai':
978 self.ui.warn(_("%s not added!\n") % f)
978 self.ui.warn(_("%s not added!\n") % f)
979 else:
979 else:
980 self.dirstate.forget([f])
980 self.dirstate.forget([f])
981
981
982 def remove(self, list, unlink=False, wlock=None):
982 def remove(self, list, unlink=False, wlock=None):
983 if unlink:
983 if unlink:
984 for f in list:
984 for f in list:
985 try:
985 try:
986 util.unlink(self.wjoin(f))
986 util.unlink(self.wjoin(f))
987 except OSError, inst:
987 except OSError, inst:
988 if inst.errno != errno.ENOENT:
988 if inst.errno != errno.ENOENT:
989 raise
989 raise
990 if not wlock:
990 if not wlock:
991 wlock = self.wlock()
991 wlock = self.wlock()
992 for f in list:
992 for f in list:
993 p = self.wjoin(f)
993 p = self.wjoin(f)
994 if os.path.exists(p):
994 if os.path.exists(p):
995 self.ui.warn(_("%s still exists!\n") % f)
995 self.ui.warn(_("%s still exists!\n") % f)
996 elif self.dirstate.state(f) == 'a':
996 elif self.dirstate.state(f) == 'a':
997 self.dirstate.forget([f])
997 self.dirstate.forget([f])
998 elif f not in self.dirstate:
998 elif f not in self.dirstate:
999 self.ui.warn(_("%s not tracked!\n") % f)
999 self.ui.warn(_("%s not tracked!\n") % f)
1000 else:
1000 else:
1001 self.dirstate.update([f], "r")
1001 self.dirstate.update([f], "r")
1002
1002
1003 def undelete(self, list, wlock=None):
1003 def undelete(self, list, wlock=None):
1004 p = self.dirstate.parents()[0]
1004 p = self.dirstate.parents()[0]
1005 mn = self.changelog.read(p)[0]
1005 mn = self.changelog.read(p)[0]
1006 m = self.manifest.read(mn)
1006 m = self.manifest.read(mn)
1007 if not wlock:
1007 if not wlock:
1008 wlock = self.wlock()
1008 wlock = self.wlock()
1009 for f in list:
1009 for f in list:
1010 if self.dirstate.state(f) not in "r":
1010 if self.dirstate.state(f) not in "r":
1011 self.ui.warn("%s not removed!\n" % f)
1011 self.ui.warn("%s not removed!\n" % f)
1012 else:
1012 else:
1013 t = self.file(f).read(m[f])
1013 t = self.file(f).read(m[f])
1014 self.wwrite(f, t)
1014 self.wwrite(f, t)
1015 util.set_exec(self.wjoin(f), m.execf(f))
1015 util.set_exec(self.wjoin(f), m.execf(f))
1016 self.dirstate.update([f], "n")
1016 self.dirstate.update([f], "n")
1017
1017
1018 def copy(self, source, dest, wlock=None):
1018 def copy(self, source, dest, wlock=None):
1019 p = self.wjoin(dest)
1019 p = self.wjoin(dest)
1020 if not os.path.exists(p):
1020 if not os.path.exists(p):
1021 self.ui.warn(_("%s does not exist!\n") % dest)
1021 self.ui.warn(_("%s does not exist!\n") % dest)
1022 elif not os.path.isfile(p):
1022 elif not os.path.isfile(p):
1023 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1023 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1024 else:
1024 else:
1025 if not wlock:
1025 if not wlock:
1026 wlock = self.wlock()
1026 wlock = self.wlock()
1027 if self.dirstate.state(dest) == '?':
1027 if self.dirstate.state(dest) == '?':
1028 self.dirstate.update([dest], "a")
1028 self.dirstate.update([dest], "a")
1029 self.dirstate.copy(source, dest)
1029 self.dirstate.copy(source, dest)
1030
1030
1031 def heads(self, start=None):
1031 def heads(self, start=None):
1032 heads = self.changelog.heads(start)
1032 heads = self.changelog.heads(start)
1033 # sort the output in rev descending order
1033 # sort the output in rev descending order
1034 heads = [(-self.changelog.rev(h), h) for h in heads]
1034 heads = [(-self.changelog.rev(h), h) for h in heads]
1035 heads.sort()
1035 heads.sort()
1036 return [n for (r, n) in heads]
1036 return [n for (r, n) in heads]
1037
1037
1038 # branchlookup returns a dict giving a list of branches for
1038 # branchlookup returns a dict giving a list of branches for
1039 # each head. A branch is defined as the tag of a node or
1039 # each head. A branch is defined as the tag of a node or
1040 # the branch of the node's parents. If a node has multiple
1040 # the branch of the node's parents. If a node has multiple
1041 # branch tags, tags are eliminated if they are visible from other
1041 # branch tags, tags are eliminated if they are visible from other
1042 # branch tags.
1042 # branch tags.
1043 #
1043 #
1044 # So, for this graph: a->b->c->d->e
1044 # So, for this graph: a->b->c->d->e
1045 # \ /
1045 # \ /
1046 # aa -----/
1046 # aa -----/
1047 # a has tag 2.6.12
1047 # a has tag 2.6.12
1048 # d has tag 2.6.13
1048 # d has tag 2.6.13
1049 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1049 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1050 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1050 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1051 # from the list.
1051 # from the list.
1052 #
1052 #
1053 # It is possible that more than one head will have the same branch tag.
1053 # It is possible that more than one head will have the same branch tag.
1054 # callers need to check the result for multiple heads under the same
1054 # callers need to check the result for multiple heads under the same
1055 # branch tag if that is a problem for them (ie checkout of a specific
1055 # branch tag if that is a problem for them (ie checkout of a specific
1056 # branch).
1056 # branch).
1057 #
1057 #
1058 # passing in a specific branch will limit the depth of the search
1058 # passing in a specific branch will limit the depth of the search
1059 # through the parents. It won't limit the branches returned in the
1059 # through the parents. It won't limit the branches returned in the
1060 # result though.
1060 # result though.
1061 def branchlookup(self, heads=None, branch=None):
1061 def branchlookup(self, heads=None, branch=None):
1062 if not heads:
1062 if not heads:
1063 heads = self.heads()
1063 heads = self.heads()
1064 headt = [ h for h in heads ]
1064 headt = [ h for h in heads ]
1065 chlog = self.changelog
1065 chlog = self.changelog
1066 branches = {}
1066 branches = {}
1067 merges = []
1067 merges = []
1068 seenmerge = {}
1068 seenmerge = {}
1069
1069
1070 # traverse the tree once for each head, recording in the branches
1070 # traverse the tree once for each head, recording in the branches
1071 # dict which tags are visible from this head. The branches
1071 # dict which tags are visible from this head. The branches
1072 # dict also records which tags are visible from each tag
1072 # dict also records which tags are visible from each tag
1073 # while we traverse.
1073 # while we traverse.
1074 while headt or merges:
1074 while headt or merges:
1075 if merges:
1075 if merges:
1076 n, found = merges.pop()
1076 n, found = merges.pop()
1077 visit = [n]
1077 visit = [n]
1078 else:
1078 else:
1079 h = headt.pop()
1079 h = headt.pop()
1080 visit = [h]
1080 visit = [h]
1081 found = [h]
1081 found = [h]
1082 seen = {}
1082 seen = {}
1083 while visit:
1083 while visit:
1084 n = visit.pop()
1084 n = visit.pop()
1085 if n in seen:
1085 if n in seen:
1086 continue
1086 continue
1087 pp = chlog.parents(n)
1087 pp = chlog.parents(n)
1088 tags = self.nodetags(n)
1088 tags = self.nodetags(n)
1089 if tags:
1089 if tags:
1090 for x in tags:
1090 for x in tags:
1091 if x == 'tip':
1091 if x == 'tip':
1092 continue
1092 continue
1093 for f in found:
1093 for f in found:
1094 branches.setdefault(f, {})[n] = 1
1094 branches.setdefault(f, {})[n] = 1
1095 branches.setdefault(n, {})[n] = 1
1095 branches.setdefault(n, {})[n] = 1
1096 break
1096 break
1097 if n not in found:
1097 if n not in found:
1098 found.append(n)
1098 found.append(n)
1099 if branch in tags:
1099 if branch in tags:
1100 continue
1100 continue
1101 seen[n] = 1
1101 seen[n] = 1
1102 if pp[1] != nullid and n not in seenmerge:
1102 if pp[1] != nullid and n not in seenmerge:
1103 merges.append((pp[1], [x for x in found]))
1103 merges.append((pp[1], [x for x in found]))
1104 seenmerge[n] = 1
1104 seenmerge[n] = 1
1105 if pp[0] != nullid:
1105 if pp[0] != nullid:
1106 visit.append(pp[0])
1106 visit.append(pp[0])
1107 # traverse the branches dict, eliminating branch tags from each
1107 # traverse the branches dict, eliminating branch tags from each
1108 # head that are visible from another branch tag for that head.
1108 # head that are visible from another branch tag for that head.
1109 out = {}
1109 out = {}
1110 viscache = {}
1110 viscache = {}
1111 for h in heads:
1111 for h in heads:
1112 def visible(node):
1112 def visible(node):
1113 if node in viscache:
1113 if node in viscache:
1114 return viscache[node]
1114 return viscache[node]
1115 ret = {}
1115 ret = {}
1116 visit = [node]
1116 visit = [node]
1117 while visit:
1117 while visit:
1118 x = visit.pop()
1118 x = visit.pop()
1119 if x in viscache:
1119 if x in viscache:
1120 ret.update(viscache[x])
1120 ret.update(viscache[x])
1121 elif x not in ret:
1121 elif x not in ret:
1122 ret[x] = 1
1122 ret[x] = 1
1123 if x in branches:
1123 if x in branches:
1124 visit[len(visit):] = branches[x].keys()
1124 visit[len(visit):] = branches[x].keys()
1125 viscache[node] = ret
1125 viscache[node] = ret
1126 return ret
1126 return ret
1127 if h not in branches:
1127 if h not in branches:
1128 continue
1128 continue
1129 # O(n^2), but somewhat limited. This only searches the
1129 # O(n^2), but somewhat limited. This only searches the
1130 # tags visible from a specific head, not all the tags in the
1130 # tags visible from a specific head, not all the tags in the
1131 # whole repo.
1131 # whole repo.
1132 for b in branches[h]:
1132 for b in branches[h]:
1133 vis = False
1133 vis = False
1134 for bb in branches[h].keys():
1134 for bb in branches[h].keys():
1135 if b != bb:
1135 if b != bb:
1136 if b in visible(bb):
1136 if b in visible(bb):
1137 vis = True
1137 vis = True
1138 break
1138 break
1139 if not vis:
1139 if not vis:
1140 l = out.setdefault(h, [])
1140 l = out.setdefault(h, [])
1141 l[len(l):] = self.nodetags(b)
1141 l[len(l):] = self.nodetags(b)
1142 return out
1142 return out
1143
1143
1144 def branches(self, nodes):
1144 def branches(self, nodes):
1145 if not nodes:
1145 if not nodes:
1146 nodes = [self.changelog.tip()]
1146 nodes = [self.changelog.tip()]
1147 b = []
1147 b = []
1148 for n in nodes:
1148 for n in nodes:
1149 t = n
1149 t = n
1150 while 1:
1150 while 1:
1151 p = self.changelog.parents(n)
1151 p = self.changelog.parents(n)
1152 if p[1] != nullid or p[0] == nullid:
1152 if p[1] != nullid or p[0] == nullid:
1153 b.append((t, n, p[0], p[1]))
1153 b.append((t, n, p[0], p[1]))
1154 break
1154 break
1155 n = p[0]
1155 n = p[0]
1156 return b
1156 return b
1157
1157
1158 def between(self, pairs):
1158 def between(self, pairs):
1159 r = []
1159 r = []
1160
1160
1161 for top, bottom in pairs:
1161 for top, bottom in pairs:
1162 n, l, i = top, [], 0
1162 n, l, i = top, [], 0
1163 f = 1
1163 f = 1
1164
1164
1165 while n != bottom:
1165 while n != bottom:
1166 p = self.changelog.parents(n)[0]
1166 p = self.changelog.parents(n)[0]
1167 if i == f:
1167 if i == f:
1168 l.append(n)
1168 l.append(n)
1169 f = f * 2
1169 f = f * 2
1170 n = p
1170 n = p
1171 i += 1
1171 i += 1
1172
1172
1173 r.append(l)
1173 r.append(l)
1174
1174
1175 return r
1175 return r
1176
1176
1177 def findincoming(self, remote, base=None, heads=None, force=False):
1177 def findincoming(self, remote, base=None, heads=None, force=False):
1178 """Return list of roots of the subsets of missing nodes from remote
1178 """Return list of roots of the subsets of missing nodes from remote
1179
1179
1180 If base dict is specified, assume that these nodes and their parents
1180 If base dict is specified, assume that these nodes and their parents
1181 exist on the remote side and that no child of a node of base exists
1181 exist on the remote side and that no child of a node of base exists
1182 in both remote and self.
1182 in both remote and self.
1183 Furthermore base will be updated to include the nodes that exists
1183 Furthermore base will be updated to include the nodes that exists
1184 in self and remote but no children exists in self and remote.
1184 in self and remote but no children exists in self and remote.
1185 If a list of heads is specified, return only nodes which are heads
1185 If a list of heads is specified, return only nodes which are heads
1186 or ancestors of these heads.
1186 or ancestors of these heads.
1187
1187
1188 All the ancestors of base are in self and in remote.
1188 All the ancestors of base are in self and in remote.
1189 All the descendants of the list returned are missing in self.
1189 All the descendants of the list returned are missing in self.
1190 (and so we know that the rest of the nodes are missing in remote, see
1190 (and so we know that the rest of the nodes are missing in remote, see
1191 outgoing)
1191 outgoing)
1192 """
1192 """
1193 m = self.changelog.nodemap
1193 m = self.changelog.nodemap
1194 search = []
1194 search = []
1195 fetch = {}
1195 fetch = {}
1196 seen = {}
1196 seen = {}
1197 seenbranch = {}
1197 seenbranch = {}
1198 if base == None:
1198 if base == None:
1199 base = {}
1199 base = {}
1200
1200
1201 if not heads:
1201 if not heads:
1202 heads = remote.heads()
1202 heads = remote.heads()
1203
1203
1204 if self.changelog.tip() == nullid:
1204 if self.changelog.tip() == nullid:
1205 base[nullid] = 1
1205 base[nullid] = 1
1206 if heads != [nullid]:
1206 if heads != [nullid]:
1207 return [nullid]
1207 return [nullid]
1208 return []
1208 return []
1209
1209
1210 # assume we're closer to the tip than the root
1210 # assume we're closer to the tip than the root
1211 # and start by examining the heads
1211 # and start by examining the heads
1212 self.ui.status(_("searching for changes\n"))
1212 self.ui.status(_("searching for changes\n"))
1213
1213
1214 unknown = []
1214 unknown = []
1215 for h in heads:
1215 for h in heads:
1216 if h not in m:
1216 if h not in m:
1217 unknown.append(h)
1217 unknown.append(h)
1218 else:
1218 else:
1219 base[h] = 1
1219 base[h] = 1
1220
1220
1221 if not unknown:
1221 if not unknown:
1222 return []
1222 return []
1223
1223
1224 req = dict.fromkeys(unknown)
1224 req = dict.fromkeys(unknown)
1225 reqcnt = 0
1225 reqcnt = 0
1226
1226
1227 # search through remote branches
1227 # search through remote branches
1228 # a 'branch' here is a linear segment of history, with four parts:
1228 # a 'branch' here is a linear segment of history, with four parts:
1229 # head, root, first parent, second parent
1229 # head, root, first parent, second parent
1230 # (a branch always has two parents (or none) by definition)
1230 # (a branch always has two parents (or none) by definition)
1231 unknown = remote.branches(unknown)
1231 unknown = remote.branches(unknown)
1232 while unknown:
1232 while unknown:
1233 r = []
1233 r = []
1234 while unknown:
1234 while unknown:
1235 n = unknown.pop(0)
1235 n = unknown.pop(0)
1236 if n[0] in seen:
1236 if n[0] in seen:
1237 continue
1237 continue
1238
1238
1239 self.ui.debug(_("examining %s:%s\n")
1239 self.ui.debug(_("examining %s:%s\n")
1240 % (short(n[0]), short(n[1])))
1240 % (short(n[0]), short(n[1])))
1241 if n[0] == nullid: # found the end of the branch
1241 if n[0] == nullid: # found the end of the branch
1242 pass
1242 pass
1243 elif n in seenbranch:
1243 elif n in seenbranch:
1244 self.ui.debug(_("branch already found\n"))
1244 self.ui.debug(_("branch already found\n"))
1245 continue
1245 continue
1246 elif n[1] and n[1] in m: # do we know the base?
1246 elif n[1] and n[1] in m: # do we know the base?
1247 self.ui.debug(_("found incomplete branch %s:%s\n")
1247 self.ui.debug(_("found incomplete branch %s:%s\n")
1248 % (short(n[0]), short(n[1])))
1248 % (short(n[0]), short(n[1])))
1249 search.append(n) # schedule branch range for scanning
1249 search.append(n) # schedule branch range for scanning
1250 seenbranch[n] = 1
1250 seenbranch[n] = 1
1251 else:
1251 else:
1252 if n[1] not in seen and n[1] not in fetch:
1252 if n[1] not in seen and n[1] not in fetch:
1253 if n[2] in m and n[3] in m:
1253 if n[2] in m and n[3] in m:
1254 self.ui.debug(_("found new changeset %s\n") %
1254 self.ui.debug(_("found new changeset %s\n") %
1255 short(n[1]))
1255 short(n[1]))
1256 fetch[n[1]] = 1 # earliest unknown
1256 fetch[n[1]] = 1 # earliest unknown
1257 for p in n[2:4]:
1257 for p in n[2:4]:
1258 if p in m:
1258 if p in m:
1259 base[p] = 1 # latest known
1259 base[p] = 1 # latest known
1260
1260
1261 for p in n[2:4]:
1261 for p in n[2:4]:
1262 if p not in req and p not in m:
1262 if p not in req and p not in m:
1263 r.append(p)
1263 r.append(p)
1264 req[p] = 1
1264 req[p] = 1
1265 seen[n[0]] = 1
1265 seen[n[0]] = 1
1266
1266
1267 if r:
1267 if r:
1268 reqcnt += 1
1268 reqcnt += 1
1269 self.ui.debug(_("request %d: %s\n") %
1269 self.ui.debug(_("request %d: %s\n") %
1270 (reqcnt, " ".join(map(short, r))))
1270 (reqcnt, " ".join(map(short, r))))
1271 for p in xrange(0, len(r), 10):
1271 for p in xrange(0, len(r), 10):
1272 for b in remote.branches(r[p:p+10]):
1272 for b in remote.branches(r[p:p+10]):
1273 self.ui.debug(_("received %s:%s\n") %
1273 self.ui.debug(_("received %s:%s\n") %
1274 (short(b[0]), short(b[1])))
1274 (short(b[0]), short(b[1])))
1275 unknown.append(b)
1275 unknown.append(b)
1276
1276
1277 # do binary search on the branches we found
1277 # do binary search on the branches we found
1278 while search:
1278 while search:
1279 n = search.pop(0)
1279 n = search.pop(0)
1280 reqcnt += 1
1280 reqcnt += 1
1281 l = remote.between([(n[0], n[1])])[0]
1281 l = remote.between([(n[0], n[1])])[0]
1282 l.append(n[1])
1282 l.append(n[1])
1283 p = n[0]
1283 p = n[0]
1284 f = 1
1284 f = 1
1285 for i in l:
1285 for i in l:
1286 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1286 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1287 if i in m:
1287 if i in m:
1288 if f <= 2:
1288 if f <= 2:
1289 self.ui.debug(_("found new branch changeset %s\n") %
1289 self.ui.debug(_("found new branch changeset %s\n") %
1290 short(p))
1290 short(p))
1291 fetch[p] = 1
1291 fetch[p] = 1
1292 base[i] = 1
1292 base[i] = 1
1293 else:
1293 else:
1294 self.ui.debug(_("narrowed branch search to %s:%s\n")
1294 self.ui.debug(_("narrowed branch search to %s:%s\n")
1295 % (short(p), short(i)))
1295 % (short(p), short(i)))
1296 search.append((p, i))
1296 search.append((p, i))
1297 break
1297 break
1298 p, f = i, f * 2
1298 p, f = i, f * 2
1299
1299
1300 # sanity check our fetch list
1300 # sanity check our fetch list
1301 for f in fetch.keys():
1301 for f in fetch.keys():
1302 if f in m:
1302 if f in m:
1303 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1303 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1304
1304
1305 if base.keys() == [nullid]:
1305 if base.keys() == [nullid]:
1306 if force:
1306 if force:
1307 self.ui.warn(_("warning: repository is unrelated\n"))
1307 self.ui.warn(_("warning: repository is unrelated\n"))
1308 else:
1308 else:
1309 raise util.Abort(_("repository is unrelated"))
1309 raise util.Abort(_("repository is unrelated"))
1310
1310
1311 self.ui.debug(_("found new changesets starting at ") +
1311 self.ui.debug(_("found new changesets starting at ") +
1312 " ".join([short(f) for f in fetch]) + "\n")
1312 " ".join([short(f) for f in fetch]) + "\n")
1313
1313
1314 self.ui.debug(_("%d total queries\n") % reqcnt)
1314 self.ui.debug(_("%d total queries\n") % reqcnt)
1315
1315
1316 return fetch.keys()
1316 return fetch.keys()
1317
1317
1318 def findoutgoing(self, remote, base=None, heads=None, force=False):
1318 def findoutgoing(self, remote, base=None, heads=None, force=False):
1319 """Return list of nodes that are roots of subsets not in remote
1319 """Return list of nodes that are roots of subsets not in remote
1320
1320
1321 If base dict is specified, assume that these nodes and their parents
1321 If base dict is specified, assume that these nodes and their parents
1322 exist on the remote side.
1322 exist on the remote side.
1323 If a list of heads is specified, return only nodes which are heads
1323 If a list of heads is specified, return only nodes which are heads
1324 or ancestors of these heads, and return a second element which
1324 or ancestors of these heads, and return a second element which
1325 contains all remote heads which get new children.
1325 contains all remote heads which get new children.
1326 """
1326 """
1327 if base == None:
1327 if base == None:
1328 base = {}
1328 base = {}
1329 self.findincoming(remote, base, heads, force=force)
1329 self.findincoming(remote, base, heads, force=force)
1330
1330
1331 self.ui.debug(_("common changesets up to ")
1331 self.ui.debug(_("common changesets up to ")
1332 + " ".join(map(short, base.keys())) + "\n")
1332 + " ".join(map(short, base.keys())) + "\n")
1333
1333
1334 remain = dict.fromkeys(self.changelog.nodemap)
1334 remain = dict.fromkeys(self.changelog.nodemap)
1335
1335
1336 # prune everything remote has from the tree
1336 # prune everything remote has from the tree
1337 del remain[nullid]
1337 del remain[nullid]
1338 remove = base.keys()
1338 remove = base.keys()
1339 while remove:
1339 while remove:
1340 n = remove.pop(0)
1340 n = remove.pop(0)
1341 if n in remain:
1341 if n in remain:
1342 del remain[n]
1342 del remain[n]
1343 for p in self.changelog.parents(n):
1343 for p in self.changelog.parents(n):
1344 remove.append(p)
1344 remove.append(p)
1345
1345
1346 # find every node whose parents have been pruned
1346 # find every node whose parents have been pruned
1347 subset = []
1347 subset = []
1348 # find every remote head that will get new children
1348 # find every remote head that will get new children
1349 updated_heads = {}
1349 updated_heads = {}
1350 for n in remain:
1350 for n in remain:
1351 p1, p2 = self.changelog.parents(n)
1351 p1, p2 = self.changelog.parents(n)
1352 if p1 not in remain and p2 not in remain:
1352 if p1 not in remain and p2 not in remain:
1353 subset.append(n)
1353 subset.append(n)
1354 if heads:
1354 if heads:
1355 if p1 in heads:
1355 if p1 in heads:
1356 updated_heads[p1] = True
1356 updated_heads[p1] = True
1357 if p2 in heads:
1357 if p2 in heads:
1358 updated_heads[p2] = True
1358 updated_heads[p2] = True
1359
1359
1360 # this is the set of all roots we have to push
1360 # this is the set of all roots we have to push
1361 if heads:
1361 if heads:
1362 return subset, updated_heads.keys()
1362 return subset, updated_heads.keys()
1363 else:
1363 else:
1364 return subset
1364 return subset
1365
1365
1366 def pull(self, remote, heads=None, force=False, lock=None):
1366 def pull(self, remote, heads=None, force=False, lock=None):
1367 mylock = False
1367 mylock = False
1368 if not lock:
1368 if not lock:
1369 lock = self.lock()
1369 lock = self.lock()
1370 mylock = True
1370 mylock = True
1371
1371
1372 try:
1372 try:
1373 fetch = self.findincoming(remote, force=force)
1373 fetch = self.findincoming(remote, force=force)
1374 if fetch == [nullid]:
1374 if fetch == [nullid]:
1375 self.ui.status(_("requesting all changes\n"))
1375 self.ui.status(_("requesting all changes\n"))
1376
1376
1377 if not fetch:
1377 if not fetch:
1378 self.ui.status(_("no changes found\n"))
1378 self.ui.status(_("no changes found\n"))
1379 return 0
1379 return 0
1380
1380
1381 if heads is None:
1381 if heads is None:
1382 cg = remote.changegroup(fetch, 'pull')
1382 cg = remote.changegroup(fetch, 'pull')
1383 else:
1383 else:
1384 if 'changegroupsubset' not in remote.capabilities:
1384 if 'changegroupsubset' not in remote.capabilities:
1385 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1385 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1386 cg = remote.changegroupsubset(fetch, heads, 'pull')
1386 cg = remote.changegroupsubset(fetch, heads, 'pull')
1387 return self.addchangegroup(cg, 'pull', remote.url())
1387 return self.addchangegroup(cg, 'pull', remote.url())
1388 finally:
1388 finally:
1389 if mylock:
1389 if mylock:
1390 lock.release()
1390 lock.release()
1391
1391
1392 def push(self, remote, force=False, revs=None):
1392 def push(self, remote, force=False, revs=None):
1393 # there are two ways to push to remote repo:
1393 # there are two ways to push to remote repo:
1394 #
1394 #
1395 # addchangegroup assumes local user can lock remote
1395 # addchangegroup assumes local user can lock remote
1396 # repo (local filesystem, old ssh servers).
1396 # repo (local filesystem, old ssh servers).
1397 #
1397 #
1398 # unbundle assumes local user cannot lock remote repo (new ssh
1398 # unbundle assumes local user cannot lock remote repo (new ssh
1399 # servers, http servers).
1399 # servers, http servers).
1400
1400
1401 if remote.capable('unbundle'):
1401 if remote.capable('unbundle'):
1402 return self.push_unbundle(remote, force, revs)
1402 return self.push_unbundle(remote, force, revs)
1403 return self.push_addchangegroup(remote, force, revs)
1403 return self.push_addchangegroup(remote, force, revs)
1404
1404
1405 def prepush(self, remote, force, revs):
1405 def prepush(self, remote, force, revs):
1406 base = {}
1406 base = {}
1407 remote_heads = remote.heads()
1407 remote_heads = remote.heads()
1408 inc = self.findincoming(remote, base, remote_heads, force=force)
1408 inc = self.findincoming(remote, base, remote_heads, force=force)
1409
1409
1410 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1410 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1411 if revs is not None:
1411 if revs is not None:
1412 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1412 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1413 else:
1413 else:
1414 bases, heads = update, self.changelog.heads()
1414 bases, heads = update, self.changelog.heads()
1415
1415
1416 if not bases:
1416 if not bases:
1417 self.ui.status(_("no changes found\n"))
1417 self.ui.status(_("no changes found\n"))
1418 return None, 1
1418 return None, 1
1419 elif not force:
1419 elif not force:
1420 # check if we're creating new remote heads
1420 # check if we're creating new remote heads
1421 # to be a remote head after push, node must be either
1421 # to be a remote head after push, node must be either
1422 # - unknown locally
1422 # - unknown locally
1423 # - a local outgoing head descended from update
1423 # - a local outgoing head descended from update
1424 # - a remote head that's known locally and not
1424 # - a remote head that's known locally and not
1425 # ancestral to an outgoing head
1425 # ancestral to an outgoing head
1426
1426
1427 warn = 0
1427 warn = 0
1428
1428
1429 if remote_heads == [nullid]:
1429 if remote_heads == [nullid]:
1430 warn = 0
1430 warn = 0
1431 elif not revs and len(heads) > len(remote_heads):
1431 elif not revs and len(heads) > len(remote_heads):
1432 warn = 1
1432 warn = 1
1433 else:
1433 else:
1434 newheads = list(heads)
1434 newheads = list(heads)
1435 for r in remote_heads:
1435 for r in remote_heads:
1436 if r in self.changelog.nodemap:
1436 if r in self.changelog.nodemap:
1437 desc = self.changelog.heads(r, heads)
1437 desc = self.changelog.heads(r, heads)
1438 l = [h for h in heads if h in desc]
1438 l = [h for h in heads if h in desc]
1439 if not l:
1439 if not l:
1440 newheads.append(r)
1440 newheads.append(r)
1441 else:
1441 else:
1442 newheads.append(r)
1442 newheads.append(r)
1443 if len(newheads) > len(remote_heads):
1443 if len(newheads) > len(remote_heads):
1444 warn = 1
1444 warn = 1
1445
1445
1446 if warn:
1446 if warn:
1447 self.ui.warn(_("abort: push creates new remote branches!\n"))
1447 self.ui.warn(_("abort: push creates new remote branches!\n"))
1448 self.ui.status(_("(did you forget to merge?"
1448 self.ui.status(_("(did you forget to merge?"
1449 " use push -f to force)\n"))
1449 " use push -f to force)\n"))
1450 return None, 1
1450 return None, 1
1451 elif inc:
1451 elif inc:
1452 self.ui.warn(_("note: unsynced remote changes!\n"))
1452 self.ui.warn(_("note: unsynced remote changes!\n"))
1453
1453
1454
1454
1455 if revs is None:
1455 if revs is None:
1456 cg = self.changegroup(update, 'push')
1456 cg = self.changegroup(update, 'push')
1457 else:
1457 else:
1458 cg = self.changegroupsubset(update, revs, 'push')
1458 cg = self.changegroupsubset(update, revs, 'push')
1459 return cg, remote_heads
1459 return cg, remote_heads
1460
1460
1461 def push_addchangegroup(self, remote, force, revs):
1461 def push_addchangegroup(self, remote, force, revs):
1462 lock = remote.lock()
1462 lock = remote.lock()
1463
1463
1464 ret = self.prepush(remote, force, revs)
1464 ret = self.prepush(remote, force, revs)
1465 if ret[0] is not None:
1465 if ret[0] is not None:
1466 cg, remote_heads = ret
1466 cg, remote_heads = ret
1467 return remote.addchangegroup(cg, 'push', self.url())
1467 return remote.addchangegroup(cg, 'push', self.url())
1468 return ret[1]
1468 return ret[1]
1469
1469
1470 def push_unbundle(self, remote, force, revs):
1470 def push_unbundle(self, remote, force, revs):
1471 # local repo finds heads on server, finds out what revs it
1471 # local repo finds heads on server, finds out what revs it
1472 # must push. once revs transferred, if server finds it has
1472 # must push. once revs transferred, if server finds it has
1473 # different heads (someone else won commit/push race), server
1473 # different heads (someone else won commit/push race), server
1474 # aborts.
1474 # aborts.
1475
1475
1476 ret = self.prepush(remote, force, revs)
1476 ret = self.prepush(remote, force, revs)
1477 if ret[0] is not None:
1477 if ret[0] is not None:
1478 cg, remote_heads = ret
1478 cg, remote_heads = ret
1479 if force: remote_heads = ['force']
1479 if force: remote_heads = ['force']
1480 return remote.unbundle(cg, remote_heads, 'push')
1480 return remote.unbundle(cg, remote_heads, 'push')
1481 return ret[1]
1481 return ret[1]
1482
1482
1483 def changegroupinfo(self, nodes):
1483 def changegroupinfo(self, nodes):
1484 self.ui.note(_("%d changesets found\n") % len(nodes))
1484 self.ui.note(_("%d changesets found\n") % len(nodes))
1485 if self.ui.debugflag:
1485 if self.ui.debugflag:
1486 self.ui.debug(_("List of changesets:\n"))
1486 self.ui.debug(_("List of changesets:\n"))
1487 for node in nodes:
1487 for node in nodes:
1488 self.ui.debug("%s\n" % hex(node))
1488 self.ui.debug("%s\n" % hex(node))
1489
1489
1490 def changegroupsubset(self, bases, heads, source):
1490 def changegroupsubset(self, bases, heads, source):
1491 """This function generates a changegroup consisting of all the nodes
1491 """This function generates a changegroup consisting of all the nodes
1492 that are descendents of any of the bases, and ancestors of any of
1492 that are descendents of any of the bases, and ancestors of any of
1493 the heads.
1493 the heads.
1494
1494
1495 It is fairly complex as determining which filenodes and which
1495 It is fairly complex as determining which filenodes and which
1496 manifest nodes need to be included for the changeset to be complete
1496 manifest nodes need to be included for the changeset to be complete
1497 is non-trivial.
1497 is non-trivial.
1498
1498
1499 Another wrinkle is doing the reverse, figuring out which changeset in
1499 Another wrinkle is doing the reverse, figuring out which changeset in
1500 the changegroup a particular filenode or manifestnode belongs to."""
1500 the changegroup a particular filenode or manifestnode belongs to."""
1501
1501
1502 self.hook('preoutgoing', throw=True, source=source)
1502 self.hook('preoutgoing', throw=True, source=source)
1503
1503
1504 # Set up some initial variables
1504 # Set up some initial variables
1505 # Make it easy to refer to self.changelog
1505 # Make it easy to refer to self.changelog
1506 cl = self.changelog
1506 cl = self.changelog
1507 # msng is short for missing - compute the list of changesets in this
1507 # msng is short for missing - compute the list of changesets in this
1508 # changegroup.
1508 # changegroup.
1509 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1509 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1510 self.changegroupinfo(msng_cl_lst)
1510 self.changegroupinfo(msng_cl_lst)
1511 # Some bases may turn out to be superfluous, and some heads may be
1511 # Some bases may turn out to be superfluous, and some heads may be
1512 # too. nodesbetween will return the minimal set of bases and heads
1512 # too. nodesbetween will return the minimal set of bases and heads
1513 # necessary to re-create the changegroup.
1513 # necessary to re-create the changegroup.
1514
1514
1515 # Known heads are the list of heads that it is assumed the recipient
1515 # Known heads are the list of heads that it is assumed the recipient
1516 # of this changegroup will know about.
1516 # of this changegroup will know about.
1517 knownheads = {}
1517 knownheads = {}
1518 # We assume that all parents of bases are known heads.
1518 # We assume that all parents of bases are known heads.
1519 for n in bases:
1519 for n in bases:
1520 for p in cl.parents(n):
1520 for p in cl.parents(n):
1521 if p != nullid:
1521 if p != nullid:
1522 knownheads[p] = 1
1522 knownheads[p] = 1
1523 knownheads = knownheads.keys()
1523 knownheads = knownheads.keys()
1524 if knownheads:
1524 if knownheads:
1525 # Now that we know what heads are known, we can compute which
1525 # Now that we know what heads are known, we can compute which
1526 # changesets are known. The recipient must know about all
1526 # changesets are known. The recipient must know about all
1527 # changesets required to reach the known heads from the null
1527 # changesets required to reach the known heads from the null
1528 # changeset.
1528 # changeset.
1529 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1529 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1530 junk = None
1530 junk = None
1531 # Transform the list into an ersatz set.
1531 # Transform the list into an ersatz set.
1532 has_cl_set = dict.fromkeys(has_cl_set)
1532 has_cl_set = dict.fromkeys(has_cl_set)
1533 else:
1533 else:
1534 # If there were no known heads, the recipient cannot be assumed to
1534 # If there were no known heads, the recipient cannot be assumed to
1535 # know about any changesets.
1535 # know about any changesets.
1536 has_cl_set = {}
1536 has_cl_set = {}
1537
1537
1538 # Make it easy to refer to self.manifest
1538 # Make it easy to refer to self.manifest
1539 mnfst = self.manifest
1539 mnfst = self.manifest
1540 # We don't know which manifests are missing yet
1540 # We don't know which manifests are missing yet
1541 msng_mnfst_set = {}
1541 msng_mnfst_set = {}
1542 # Nor do we know which filenodes are missing.
1542 # Nor do we know which filenodes are missing.
1543 msng_filenode_set = {}
1543 msng_filenode_set = {}
1544
1544
1545 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1545 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1546 junk = None
1546 junk = None
1547
1547
1548 # A changeset always belongs to itself, so the changenode lookup
1548 # A changeset always belongs to itself, so the changenode lookup
1549 # function for a changenode is identity.
1549 # function for a changenode is identity.
1550 def identity(x):
1550 def identity(x):
1551 return x
1551 return x
1552
1552
1553 # A function generating function. Sets up an environment for the
1553 # A function generating function. Sets up an environment for the
1554 # inner function.
1554 # inner function.
1555 def cmp_by_rev_func(revlog):
1555 def cmp_by_rev_func(revlog):
1556 # Compare two nodes by their revision number in the environment's
1556 # Compare two nodes by their revision number in the environment's
1557 # revision history. Since the revision number both represents the
1557 # revision history. Since the revision number both represents the
1558 # most efficient order to read the nodes in, and represents a
1558 # most efficient order to read the nodes in, and represents a
1559 # topological sorting of the nodes, this function is often useful.
1559 # topological sorting of the nodes, this function is often useful.
1560 def cmp_by_rev(a, b):
1560 def cmp_by_rev(a, b):
1561 return cmp(revlog.rev(a), revlog.rev(b))
1561 return cmp(revlog.rev(a), revlog.rev(b))
1562 return cmp_by_rev
1562 return cmp_by_rev
1563
1563
1564 # If we determine that a particular file or manifest node must be a
1564 # If we determine that a particular file or manifest node must be a
1565 # node that the recipient of the changegroup will already have, we can
1565 # node that the recipient of the changegroup will already have, we can
1566 # also assume the recipient will have all the parents. This function
1566 # also assume the recipient will have all the parents. This function
1567 # prunes them from the set of missing nodes.
1567 # prunes them from the set of missing nodes.
1568 def prune_parents(revlog, hasset, msngset):
1568 def prune_parents(revlog, hasset, msngset):
1569 haslst = hasset.keys()
1569 haslst = hasset.keys()
1570 haslst.sort(cmp_by_rev_func(revlog))
1570 haslst.sort(cmp_by_rev_func(revlog))
1571 for node in haslst:
1571 for node in haslst:
1572 parentlst = [p for p in revlog.parents(node) if p != nullid]
1572 parentlst = [p for p in revlog.parents(node) if p != nullid]
1573 while parentlst:
1573 while parentlst:
1574 n = parentlst.pop()
1574 n = parentlst.pop()
1575 if n not in hasset:
1575 if n not in hasset:
1576 hasset[n] = 1
1576 hasset[n] = 1
1577 p = [p for p in revlog.parents(n) if p != nullid]
1577 p = [p for p in revlog.parents(n) if p != nullid]
1578 parentlst.extend(p)
1578 parentlst.extend(p)
1579 for n in hasset:
1579 for n in hasset:
1580 msngset.pop(n, None)
1580 msngset.pop(n, None)
1581
1581
1582 # This is a function generating function used to set up an environment
1582 # This is a function generating function used to set up an environment
1583 # for the inner function to execute in.
1583 # for the inner function to execute in.
1584 def manifest_and_file_collector(changedfileset):
1584 def manifest_and_file_collector(changedfileset):
1585 # This is an information gathering function that gathers
1585 # This is an information gathering function that gathers
1586 # information from each changeset node that goes out as part of
1586 # information from each changeset node that goes out as part of
1587 # the changegroup. The information gathered is a list of which
1587 # the changegroup. The information gathered is a list of which
1588 # manifest nodes are potentially required (the recipient may
1588 # manifest nodes are potentially required (the recipient may
1589 # already have them) and total list of all files which were
1589 # already have them) and total list of all files which were
1590 # changed in any changeset in the changegroup.
1590 # changed in any changeset in the changegroup.
1591 #
1591 #
1592 # We also remember the first changenode we saw any manifest
1592 # We also remember the first changenode we saw any manifest
1593 # referenced by so we can later determine which changenode 'owns'
1593 # referenced by so we can later determine which changenode 'owns'
1594 # the manifest.
1594 # the manifest.
1595 def collect_manifests_and_files(clnode):
1595 def collect_manifests_and_files(clnode):
1596 c = cl.read(clnode)
1596 c = cl.read(clnode)
1597 for f in c[3]:
1597 for f in c[3]:
1598 # This is to make sure we only have one instance of each
1598 # This is to make sure we only have one instance of each
1599 # filename string for each filename.
1599 # filename string for each filename.
1600 changedfileset.setdefault(f, f)
1600 changedfileset.setdefault(f, f)
1601 msng_mnfst_set.setdefault(c[0], clnode)
1601 msng_mnfst_set.setdefault(c[0], clnode)
1602 return collect_manifests_and_files
1602 return collect_manifests_and_files
1603
1603
1604 # Figure out which manifest nodes (of the ones we think might be part
1604 # Figure out which manifest nodes (of the ones we think might be part
1605 # of the changegroup) the recipient must know about and remove them
1605 # of the changegroup) the recipient must know about and remove them
1606 # from the changegroup.
1606 # from the changegroup.
1607 def prune_manifests():
1607 def prune_manifests():
1608 has_mnfst_set = {}
1608 has_mnfst_set = {}
1609 for n in msng_mnfst_set:
1609 for n in msng_mnfst_set:
1610 # If a 'missing' manifest thinks it belongs to a changenode
1610 # If a 'missing' manifest thinks it belongs to a changenode
1611 # the recipient is assumed to have, obviously the recipient
1611 # the recipient is assumed to have, obviously the recipient
1612 # must have that manifest.
1612 # must have that manifest.
1613 linknode = cl.node(mnfst.linkrev(n))
1613 linknode = cl.node(mnfst.linkrev(n))
1614 if linknode in has_cl_set:
1614 if linknode in has_cl_set:
1615 has_mnfst_set[n] = 1
1615 has_mnfst_set[n] = 1
1616 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1616 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1617
1617
1618 # Use the information collected in collect_manifests_and_files to say
1618 # Use the information collected in collect_manifests_and_files to say
1619 # which changenode any manifestnode belongs to.
1619 # which changenode any manifestnode belongs to.
1620 def lookup_manifest_link(mnfstnode):
1620 def lookup_manifest_link(mnfstnode):
1621 return msng_mnfst_set[mnfstnode]
1621 return msng_mnfst_set[mnfstnode]
1622
1622
1623 # A function generating function that sets up the initial environment
1623 # A function generating function that sets up the initial environment
1624 # the inner function.
1624 # the inner function.
1625 def filenode_collector(changedfiles):
1625 def filenode_collector(changedfiles):
1626 next_rev = [0]
1626 next_rev = [0]
1627 # This gathers information from each manifestnode included in the
1627 # This gathers information from each manifestnode included in the
1628 # changegroup about which filenodes the manifest node references
1628 # changegroup about which filenodes the manifest node references
1629 # so we can include those in the changegroup too.
1629 # so we can include those in the changegroup too.
1630 #
1630 #
1631 # It also remembers which changenode each filenode belongs to. It
1631 # It also remembers which changenode each filenode belongs to. It
1632 # does this by assuming the a filenode belongs to the changenode
1632 # does this by assuming the a filenode belongs to the changenode
1633 # the first manifest that references it belongs to.
1633 # the first manifest that references it belongs to.
1634 def collect_msng_filenodes(mnfstnode):
1634 def collect_msng_filenodes(mnfstnode):
1635 r = mnfst.rev(mnfstnode)
1635 r = mnfst.rev(mnfstnode)
1636 if r == next_rev[0]:
1636 if r == next_rev[0]:
1637 # If the last rev we looked at was the one just previous,
1637 # If the last rev we looked at was the one just previous,
1638 # we only need to see a diff.
1638 # we only need to see a diff.
1639 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1639 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1640 # For each line in the delta
1640 # For each line in the delta
1641 for dline in delta.splitlines():
1641 for dline in delta.splitlines():
1642 # get the filename and filenode for that line
1642 # get the filename and filenode for that line
1643 f, fnode = dline.split('\0')
1643 f, fnode = dline.split('\0')
1644 fnode = bin(fnode[:40])
1644 fnode = bin(fnode[:40])
1645 f = changedfiles.get(f, None)
1645 f = changedfiles.get(f, None)
1646 # And if the file is in the list of files we care
1646 # And if the file is in the list of files we care
1647 # about.
1647 # about.
1648 if f is not None:
1648 if f is not None:
1649 # Get the changenode this manifest belongs to
1649 # Get the changenode this manifest belongs to
1650 clnode = msng_mnfst_set[mnfstnode]
1650 clnode = msng_mnfst_set[mnfstnode]
1651 # Create the set of filenodes for the file if
1651 # Create the set of filenodes for the file if
1652 # there isn't one already.
1652 # there isn't one already.
1653 ndset = msng_filenode_set.setdefault(f, {})
1653 ndset = msng_filenode_set.setdefault(f, {})
1654 # And set the filenode's changelog node to the
1654 # And set the filenode's changelog node to the
1655 # manifest's if it hasn't been set already.
1655 # manifest's if it hasn't been set already.
1656 ndset.setdefault(fnode, clnode)
1656 ndset.setdefault(fnode, clnode)
1657 else:
1657 else:
1658 # Otherwise we need a full manifest.
1658 # Otherwise we need a full manifest.
1659 m = mnfst.read(mnfstnode)
1659 m = mnfst.read(mnfstnode)
1660 # For every file in we care about.
1660 # For every file in we care about.
1661 for f in changedfiles:
1661 for f in changedfiles:
1662 fnode = m.get(f, None)
1662 fnode = m.get(f, None)
1663 # If it's in the manifest
1663 # If it's in the manifest
1664 if fnode is not None:
1664 if fnode is not None:
1665 # See comments above.
1665 # See comments above.
1666 clnode = msng_mnfst_set[mnfstnode]
1666 clnode = msng_mnfst_set[mnfstnode]
1667 ndset = msng_filenode_set.setdefault(f, {})
1667 ndset = msng_filenode_set.setdefault(f, {})
1668 ndset.setdefault(fnode, clnode)
1668 ndset.setdefault(fnode, clnode)
1669 # Remember the revision we hope to see next.
1669 # Remember the revision we hope to see next.
1670 next_rev[0] = r + 1
1670 next_rev[0] = r + 1
1671 return collect_msng_filenodes
1671 return collect_msng_filenodes
1672
1672
1673 # We have a list of filenodes we think we need for a file, lets remove
1673 # We have a list of filenodes we think we need for a file, lets remove
1674 # all those we now the recipient must have.
1674 # all those we now the recipient must have.
1675 def prune_filenodes(f, filerevlog):
1675 def prune_filenodes(f, filerevlog):
1676 msngset = msng_filenode_set[f]
1676 msngset = msng_filenode_set[f]
1677 hasset = {}
1677 hasset = {}
1678 # If a 'missing' filenode thinks it belongs to a changenode we
1678 # If a 'missing' filenode thinks it belongs to a changenode we
1679 # assume the recipient must have, then the recipient must have
1679 # assume the recipient must have, then the recipient must have
1680 # that filenode.
1680 # that filenode.
1681 for n in msngset:
1681 for n in msngset:
1682 clnode = cl.node(filerevlog.linkrev(n))
1682 clnode = cl.node(filerevlog.linkrev(n))
1683 if clnode in has_cl_set:
1683 if clnode in has_cl_set:
1684 hasset[n] = 1
1684 hasset[n] = 1
1685 prune_parents(filerevlog, hasset, msngset)
1685 prune_parents(filerevlog, hasset, msngset)
1686
1686
1687 # A function generator function that sets up the a context for the
1687 # A function generator function that sets up the a context for the
1688 # inner function.
1688 # inner function.
1689 def lookup_filenode_link_func(fname):
1689 def lookup_filenode_link_func(fname):
1690 msngset = msng_filenode_set[fname]
1690 msngset = msng_filenode_set[fname]
1691 # Lookup the changenode the filenode belongs to.
1691 # Lookup the changenode the filenode belongs to.
1692 def lookup_filenode_link(fnode):
1692 def lookup_filenode_link(fnode):
1693 return msngset[fnode]
1693 return msngset[fnode]
1694 return lookup_filenode_link
1694 return lookup_filenode_link
1695
1695
1696 # Now that we have all theses utility functions to help out and
1696 # Now that we have all theses utility functions to help out and
1697 # logically divide up the task, generate the group.
1697 # logically divide up the task, generate the group.
1698 def gengroup():
1698 def gengroup():
1699 # The set of changed files starts empty.
1699 # The set of changed files starts empty.
1700 changedfiles = {}
1700 changedfiles = {}
1701 # Create a changenode group generator that will call our functions
1701 # Create a changenode group generator that will call our functions
1702 # back to lookup the owning changenode and collect information.
1702 # back to lookup the owning changenode and collect information.
1703 group = cl.group(msng_cl_lst, identity,
1703 group = cl.group(msng_cl_lst, identity,
1704 manifest_and_file_collector(changedfiles))
1704 manifest_and_file_collector(changedfiles))
1705 for chnk in group:
1705 for chnk in group:
1706 yield chnk
1706 yield chnk
1707
1707
1708 # The list of manifests has been collected by the generator
1708 # The list of manifests has been collected by the generator
1709 # calling our functions back.
1709 # calling our functions back.
1710 prune_manifests()
1710 prune_manifests()
1711 msng_mnfst_lst = msng_mnfst_set.keys()
1711 msng_mnfst_lst = msng_mnfst_set.keys()
1712 # Sort the manifestnodes by revision number.
1712 # Sort the manifestnodes by revision number.
1713 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1713 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1714 # Create a generator for the manifestnodes that calls our lookup
1714 # Create a generator for the manifestnodes that calls our lookup
1715 # and data collection functions back.
1715 # and data collection functions back.
1716 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1716 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1717 filenode_collector(changedfiles))
1717 filenode_collector(changedfiles))
1718 for chnk in group:
1718 for chnk in group:
1719 yield chnk
1719 yield chnk
1720
1720
1721 # These are no longer needed, dereference and toss the memory for
1721 # These are no longer needed, dereference and toss the memory for
1722 # them.
1722 # them.
1723 msng_mnfst_lst = None
1723 msng_mnfst_lst = None
1724 msng_mnfst_set.clear()
1724 msng_mnfst_set.clear()
1725
1725
1726 changedfiles = changedfiles.keys()
1726 changedfiles = changedfiles.keys()
1727 changedfiles.sort()
1727 changedfiles.sort()
1728 # Go through all our files in order sorted by name.
1728 # Go through all our files in order sorted by name.
1729 for fname in changedfiles:
1729 for fname in changedfiles:
1730 filerevlog = self.file(fname)
1730 filerevlog = self.file(fname)
1731 # Toss out the filenodes that the recipient isn't really
1731 # Toss out the filenodes that the recipient isn't really
1732 # missing.
1732 # missing.
1733 if msng_filenode_set.has_key(fname):
1733 if msng_filenode_set.has_key(fname):
1734 prune_filenodes(fname, filerevlog)
1734 prune_filenodes(fname, filerevlog)
1735 msng_filenode_lst = msng_filenode_set[fname].keys()
1735 msng_filenode_lst = msng_filenode_set[fname].keys()
1736 else:
1736 else:
1737 msng_filenode_lst = []
1737 msng_filenode_lst = []
1738 # If any filenodes are left, generate the group for them,
1738 # If any filenodes are left, generate the group for them,
1739 # otherwise don't bother.
1739 # otherwise don't bother.
1740 if len(msng_filenode_lst) > 0:
1740 if len(msng_filenode_lst) > 0:
1741 yield changegroup.genchunk(fname)
1741 yield changegroup.genchunk(fname)
1742 # Sort the filenodes by their revision #
1742 # Sort the filenodes by their revision #
1743 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1743 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1744 # Create a group generator and only pass in a changenode
1744 # Create a group generator and only pass in a changenode
1745 # lookup function as we need to collect no information
1745 # lookup function as we need to collect no information
1746 # from filenodes.
1746 # from filenodes.
1747 group = filerevlog.group(msng_filenode_lst,
1747 group = filerevlog.group(msng_filenode_lst,
1748 lookup_filenode_link_func(fname))
1748 lookup_filenode_link_func(fname))
1749 for chnk in group:
1749 for chnk in group:
1750 yield chnk
1750 yield chnk
1751 if msng_filenode_set.has_key(fname):
1751 if msng_filenode_set.has_key(fname):
1752 # Don't need this anymore, toss it to free memory.
1752 # Don't need this anymore, toss it to free memory.
1753 del msng_filenode_set[fname]
1753 del msng_filenode_set[fname]
1754 # Signal that no more groups are left.
1754 # Signal that no more groups are left.
1755 yield changegroup.closechunk()
1755 yield changegroup.closechunk()
1756
1756
1757 if msng_cl_lst:
1757 if msng_cl_lst:
1758 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1758 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1759
1759
1760 return util.chunkbuffer(gengroup())
1760 return util.chunkbuffer(gengroup())
1761
1761
1762 def changegroup(self, basenodes, source):
1762 def changegroup(self, basenodes, source):
1763 """Generate a changegroup of all nodes that we have that a recipient
1763 """Generate a changegroup of all nodes that we have that a recipient
1764 doesn't.
1764 doesn't.
1765
1765
1766 This is much easier than the previous function as we can assume that
1766 This is much easier than the previous function as we can assume that
1767 the recipient has any changenode we aren't sending them."""
1767 the recipient has any changenode we aren't sending them."""
1768
1768
1769 self.hook('preoutgoing', throw=True, source=source)
1769 self.hook('preoutgoing', throw=True, source=source)
1770
1770
1771 cl = self.changelog
1771 cl = self.changelog
1772 nodes = cl.nodesbetween(basenodes, None)[0]
1772 nodes = cl.nodesbetween(basenodes, None)[0]
1773 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1773 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1774 self.changegroupinfo(nodes)
1774 self.changegroupinfo(nodes)
1775
1775
1776 def identity(x):
1776 def identity(x):
1777 return x
1777 return x
1778
1778
1779 def gennodelst(revlog):
1779 def gennodelst(revlog):
1780 for r in xrange(0, revlog.count()):
1780 for r in xrange(0, revlog.count()):
1781 n = revlog.node(r)
1781 n = revlog.node(r)
1782 if revlog.linkrev(n) in revset:
1782 if revlog.linkrev(n) in revset:
1783 yield n
1783 yield n
1784
1784
1785 def changed_file_collector(changedfileset):
1785 def changed_file_collector(changedfileset):
1786 def collect_changed_files(clnode):
1786 def collect_changed_files(clnode):
1787 c = cl.read(clnode)
1787 c = cl.read(clnode)
1788 for fname in c[3]:
1788 for fname in c[3]:
1789 changedfileset[fname] = 1
1789 changedfileset[fname] = 1
1790 return collect_changed_files
1790 return collect_changed_files
1791
1791
1792 def lookuprevlink_func(revlog):
1792 def lookuprevlink_func(revlog):
1793 def lookuprevlink(n):
1793 def lookuprevlink(n):
1794 return cl.node(revlog.linkrev(n))
1794 return cl.node(revlog.linkrev(n))
1795 return lookuprevlink
1795 return lookuprevlink
1796
1796
1797 def gengroup():
1797 def gengroup():
1798 # construct a list of all changed files
1798 # construct a list of all changed files
1799 changedfiles = {}
1799 changedfiles = {}
1800
1800
1801 for chnk in cl.group(nodes, identity,
1801 for chnk in cl.group(nodes, identity,
1802 changed_file_collector(changedfiles)):
1802 changed_file_collector(changedfiles)):
1803 yield chnk
1803 yield chnk
1804 changedfiles = changedfiles.keys()
1804 changedfiles = changedfiles.keys()
1805 changedfiles.sort()
1805 changedfiles.sort()
1806
1806
1807 mnfst = self.manifest
1807 mnfst = self.manifest
1808 nodeiter = gennodelst(mnfst)
1808 nodeiter = gennodelst(mnfst)
1809 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1809 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1810 yield chnk
1810 yield chnk
1811
1811
1812 for fname in changedfiles:
1812 for fname in changedfiles:
1813 filerevlog = self.file(fname)
1813 filerevlog = self.file(fname)
1814 nodeiter = gennodelst(filerevlog)
1814 nodeiter = gennodelst(filerevlog)
1815 nodeiter = list(nodeiter)
1815 nodeiter = list(nodeiter)
1816 if nodeiter:
1816 if nodeiter:
1817 yield changegroup.genchunk(fname)
1817 yield changegroup.genchunk(fname)
1818 lookup = lookuprevlink_func(filerevlog)
1818 lookup = lookuprevlink_func(filerevlog)
1819 for chnk in filerevlog.group(nodeiter, lookup):
1819 for chnk in filerevlog.group(nodeiter, lookup):
1820 yield chnk
1820 yield chnk
1821
1821
1822 yield changegroup.closechunk()
1822 yield changegroup.closechunk()
1823
1823
1824 if nodes:
1824 if nodes:
1825 self.hook('outgoing', node=hex(nodes[0]), source=source)
1825 self.hook('outgoing', node=hex(nodes[0]), source=source)
1826
1826
1827 return util.chunkbuffer(gengroup())
1827 return util.chunkbuffer(gengroup())
1828
1828
1829 def addchangegroup(self, source, srctype, url):
1829 def addchangegroup(self, source, srctype, url):
1830 """add changegroup to repo.
1830 """add changegroup to repo.
1831
1831
1832 return values:
1832 return values:
1833 - nothing changed or no source: 0
1833 - nothing changed or no source: 0
1834 - more heads than before: 1+added heads (2..n)
1834 - more heads than before: 1+added heads (2..n)
1835 - less heads than before: -1-removed heads (-2..-n)
1835 - less heads than before: -1-removed heads (-2..-n)
1836 - number of heads stays the same: 1
1836 - number of heads stays the same: 1
1837 """
1837 """
1838 def csmap(x):
1838 def csmap(x):
1839 self.ui.debug(_("add changeset %s\n") % short(x))
1839 self.ui.debug(_("add changeset %s\n") % short(x))
1840 return cl.count()
1840 return cl.count()
1841
1841
1842 def revmap(x):
1842 def revmap(x):
1843 return cl.rev(x)
1843 return cl.rev(x)
1844
1844
1845 if not source:
1845 if not source:
1846 return 0
1846 return 0
1847
1847
1848 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1848 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1849
1849
1850 changesets = files = revisions = 0
1850 changesets = files = revisions = 0
1851
1851
1852 tr = self.transaction()
1852 tr = self.transaction()
1853
1853
1854 # write changelog data to temp files so concurrent readers will not see
1854 # write changelog data to temp files so concurrent readers will not see
1855 # inconsistent view
1855 # inconsistent view
1856 cl = None
1856 cl = None
1857 try:
1857 try:
1858 cl = appendfile.appendchangelog(self.sopener,
1858 cl = appendfile.appendchangelog(self.sopener,
1859 self.changelog.version)
1859 self.changelog.version)
1860
1860
1861 oldheads = len(cl.heads())
1861 oldheads = len(cl.heads())
1862
1862
1863 # pull off the changeset group
1863 # pull off the changeset group
1864 self.ui.status(_("adding changesets\n"))
1864 self.ui.status(_("adding changesets\n"))
1865 cor = cl.count() - 1
1865 cor = cl.count() - 1
1866 chunkiter = changegroup.chunkiter(source)
1866 chunkiter = changegroup.chunkiter(source)
1867 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1867 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1868 raise util.Abort(_("received changelog group is empty"))
1868 raise util.Abort(_("received changelog group is empty"))
1869 cnr = cl.count() - 1
1869 cnr = cl.count() - 1
1870 changesets = cnr - cor
1870 changesets = cnr - cor
1871
1871
1872 # pull off the manifest group
1872 # pull off the manifest group
1873 self.ui.status(_("adding manifests\n"))
1873 self.ui.status(_("adding manifests\n"))
1874 chunkiter = changegroup.chunkiter(source)
1874 chunkiter = changegroup.chunkiter(source)
1875 # no need to check for empty manifest group here:
1875 # no need to check for empty manifest group here:
1876 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1876 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1877 # no new manifest will be created and the manifest group will
1877 # no new manifest will be created and the manifest group will
1878 # be empty during the pull
1878 # be empty during the pull
1879 self.manifest.addgroup(chunkiter, revmap, tr)
1879 self.manifest.addgroup(chunkiter, revmap, tr)
1880
1880
1881 # process the files
1881 # process the files
1882 self.ui.status(_("adding file changes\n"))
1882 self.ui.status(_("adding file changes\n"))
1883 while 1:
1883 while 1:
1884 f = changegroup.getchunk(source)
1884 f = changegroup.getchunk(source)
1885 if not f:
1885 if not f:
1886 break
1886 break
1887 self.ui.debug(_("adding %s revisions\n") % f)
1887 self.ui.debug(_("adding %s revisions\n") % f)
1888 fl = self.file(f)
1888 fl = self.file(f)
1889 o = fl.count()
1889 o = fl.count()
1890 chunkiter = changegroup.chunkiter(source)
1890 chunkiter = changegroup.chunkiter(source)
1891 if fl.addgroup(chunkiter, revmap, tr) is None:
1891 if fl.addgroup(chunkiter, revmap, tr) is None:
1892 raise util.Abort(_("received file revlog group is empty"))
1892 raise util.Abort(_("received file revlog group is empty"))
1893 revisions += fl.count() - o
1893 revisions += fl.count() - o
1894 files += 1
1894 files += 1
1895
1895
1896 cl.writedata()
1896 cl.writedata()
1897 finally:
1897 finally:
1898 if cl:
1898 if cl:
1899 cl.cleanup()
1899 cl.cleanup()
1900
1900
1901 # make changelog see real files again
1901 # make changelog see real files again
1902 self.changelog = changelog.changelog(self.sopener,
1902 self.changelog = changelog.changelog(self.sopener,
1903 self.changelog.version)
1903 self.changelog.version)
1904 self.changelog.checkinlinesize(tr)
1904 self.changelog.checkinlinesize(tr)
1905
1905
1906 newheads = len(self.changelog.heads())
1906 newheads = len(self.changelog.heads())
1907 heads = ""
1907 heads = ""
1908 if oldheads and newheads != oldheads:
1908 if oldheads and newheads != oldheads:
1909 heads = _(" (%+d heads)") % (newheads - oldheads)
1909 heads = _(" (%+d heads)") % (newheads - oldheads)
1910
1910
1911 self.ui.status(_("added %d changesets"
1911 self.ui.status(_("added %d changesets"
1912 " with %d changes to %d files%s\n")
1912 " with %d changes to %d files%s\n")
1913 % (changesets, revisions, files, heads))
1913 % (changesets, revisions, files, heads))
1914
1914
1915 if changesets > 0:
1915 if changesets > 0:
1916 self.hook('pretxnchangegroup', throw=True,
1916 self.hook('pretxnchangegroup', throw=True,
1917 node=hex(self.changelog.node(cor+1)), source=srctype,
1917 node=hex(self.changelog.node(cor+1)), source=srctype,
1918 url=url)
1918 url=url)
1919
1919
1920 tr.close()
1920 tr.close()
1921
1921
1922 if changesets > 0:
1922 if changesets > 0:
1923 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1923 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1924 source=srctype, url=url)
1924 source=srctype, url=url)
1925
1925
1926 for i in xrange(cor + 1, cnr + 1):
1926 for i in xrange(cor + 1, cnr + 1):
1927 self.hook("incoming", node=hex(self.changelog.node(i)),
1927 self.hook("incoming", node=hex(self.changelog.node(i)),
1928 source=srctype, url=url)
1928 source=srctype, url=url)
1929
1929
1930 # never return 0 here:
1930 # never return 0 here:
1931 if newheads < oldheads:
1931 if newheads < oldheads:
1932 return newheads - oldheads - 1
1932 return newheads - oldheads - 1
1933 else:
1933 else:
1934 return newheads - oldheads + 1
1934 return newheads - oldheads + 1
1935
1935
1936
1936
1937 def stream_in(self, remote):
1937 def stream_in(self, remote):
1938 fp = remote.stream_out()
1938 fp = remote.stream_out()
1939 l = fp.readline()
1939 l = fp.readline()
1940 try:
1940 try:
1941 resp = int(l)
1941 resp = int(l)
1942 except ValueError:
1942 except ValueError:
1943 raise util.UnexpectedOutput(
1943 raise util.UnexpectedOutput(
1944 _('Unexpected response from remote server:'), l)
1944 _('Unexpected response from remote server:'), l)
1945 if resp == 1:
1945 if resp == 1:
1946 raise util.Abort(_('operation forbidden by server'))
1946 raise util.Abort(_('operation forbidden by server'))
1947 elif resp == 2:
1947 elif resp == 2:
1948 raise util.Abort(_('locking the remote repository failed'))
1948 raise util.Abort(_('locking the remote repository failed'))
1949 elif resp != 0:
1949 elif resp != 0:
1950 raise util.Abort(_('the server sent an unknown error code'))
1950 raise util.Abort(_('the server sent an unknown error code'))
1951 self.ui.status(_('streaming all changes\n'))
1951 self.ui.status(_('streaming all changes\n'))
1952 l = fp.readline()
1952 l = fp.readline()
1953 try:
1953 try:
1954 total_files, total_bytes = map(int, l.split(' ', 1))
1954 total_files, total_bytes = map(int, l.split(' ', 1))
1955 except ValueError, TypeError:
1955 except ValueError, TypeError:
1956 raise util.UnexpectedOutput(
1956 raise util.UnexpectedOutput(
1957 _('Unexpected response from remote server:'), l)
1957 _('Unexpected response from remote server:'), l)
1958 self.ui.status(_('%d files to transfer, %s of data\n') %
1958 self.ui.status(_('%d files to transfer, %s of data\n') %
1959 (total_files, util.bytecount(total_bytes)))
1959 (total_files, util.bytecount(total_bytes)))
1960 start = time.time()
1960 start = time.time()
1961 for i in xrange(total_files):
1961 for i in xrange(total_files):
1962 # XXX doesn't support '\n' or '\r' in filenames
1962 # XXX doesn't support '\n' or '\r' in filenames
1963 l = fp.readline()
1963 l = fp.readline()
1964 try:
1964 try:
1965 name, size = l.split('\0', 1)
1965 name, size = l.split('\0', 1)
1966 size = int(size)
1966 size = int(size)
1967 except ValueError, TypeError:
1967 except ValueError, TypeError:
1968 raise util.UnexpectedOutput(
1968 raise util.UnexpectedOutput(
1969 _('Unexpected response from remote server:'), l)
1969 _('Unexpected response from remote server:'), l)
1970 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1970 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1971 ofp = self.sopener(name, 'w')
1971 ofp = self.sopener(name, 'w')
1972 for chunk in util.filechunkiter(fp, limit=size):
1972 for chunk in util.filechunkiter(fp, limit=size):
1973 ofp.write(chunk)
1973 ofp.write(chunk)
1974 ofp.close()
1974 ofp.close()
1975 elapsed = time.time() - start
1975 elapsed = time.time() - start
1976 if elapsed <= 0:
1976 if elapsed <= 0:
1977 elapsed = 0.001
1977 elapsed = 0.001
1978 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1978 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1979 (util.bytecount(total_bytes), elapsed,
1979 (util.bytecount(total_bytes), elapsed,
1980 util.bytecount(total_bytes / elapsed)))
1980 util.bytecount(total_bytes / elapsed)))
1981 self.reload()
1981 self.reload()
1982 return len(self.heads()) + 1
1982 return len(self.heads()) + 1
1983
1983
1984 def clone(self, remote, heads=[], stream=False):
1984 def clone(self, remote, heads=[], stream=False):
1985 '''clone remote repository.
1985 '''clone remote repository.
1986
1986
1987 keyword arguments:
1987 keyword arguments:
1988 heads: list of revs to clone (forces use of pull)
1988 heads: list of revs to clone (forces use of pull)
1989 stream: use streaming clone if possible'''
1989 stream: use streaming clone if possible'''
1990
1990
1991 # now, all clients that can request uncompressed clones can
1991 # now, all clients that can request uncompressed clones can
1992 # read repo formats supported by all servers that can serve
1992 # read repo formats supported by all servers that can serve
1993 # them.
1993 # them.
1994
1994
1995 # if revlog format changes, client will have to check version
1995 # if revlog format changes, client will have to check version
1996 # and format flags on "stream" capability, and use
1996 # and format flags on "stream" capability, and use
1997 # uncompressed only if compatible.
1997 # uncompressed only if compatible.
1998
1998
1999 if stream and not heads and remote.capable('stream'):
1999 if stream and not heads and remote.capable('stream'):
2000 return self.stream_in(remote)
2000 return self.stream_in(remote)
2001 return self.pull(remote, heads)
2001 return self.pull(remote, heads)
2002
2002
2003 # used to avoid circular references so destructors work
2003 # used to avoid circular references so destructors work
2004 def aftertrans(files):
2004 def aftertrans(files):
2005 renamefiles = [tuple(t) for t in files]
2005 renamefiles = [tuple(t) for t in files]
2006 def a():
2006 def a():
2007 for src, dest in renamefiles:
2007 for src, dest in renamefiles:
2008 util.rename(src, dest)
2008 util.rename(src, dest)
2009 return a
2009 return a
2010
2010
2011 def instance(ui, path, create):
2011 def instance(ui, path, create):
2012 return localrepository(ui, util.drop_scheme('file', path), create)
2012 return localrepository(ui, util.drop_scheme('file', path), create)
2013
2013
2014 def islocal(path):
2014 def islocal(path):
2015 return True
2015 return True
@@ -1,85 +1,104
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 hg id
6 hg id
7 echo a > a
7 echo a > a
8 hg add a
8 hg add a
9 hg commit -m "test" -d "1000000 0"
9 hg commit -m "test" -d "1000000 0"
10 hg co
10 hg co
11 hg identify
11 hg identify
12 T=`hg tip --debug | head -n 1 | cut -d : -f 3`
12 T=`hg tip --debug | head -n 1 | cut -d : -f 3`
13 hg tag -l "This is a local tag with a really long name!"
13 hg tag -l "This is a local tag with a really long name!"
14 hg tags
14 hg tags
15 rm .hg/localtags
15 rm .hg/localtags
16 echo "$T first" > .hgtags
16 echo "$T first" > .hgtags
17 cat .hgtags
17 cat .hgtags
18 hg add .hgtags
18 hg add .hgtags
19 hg commit -m "add tags" -d "1000000 0"
19 hg commit -m "add tags" -d "1000000 0"
20 hg tags
20 hg tags
21 hg identify
21 hg identify
22 echo bb > a
22 echo bb > a
23 hg status
23 hg status
24 hg identify
24 hg identify
25 hg co first
25 hg co first
26 hg id
26 hg id
27 hg -v id
27 hg -v id
28 hg status
28 hg status
29 echo 1 > b
29 echo 1 > b
30 hg add b
30 hg add b
31 hg commit -m "branch" -d "1000000 0"
31 hg commit -m "branch" -d "1000000 0"
32 hg id
32 hg id
33 hg merge 1
33 hg merge 1
34 hg id
34 hg id
35 hg status
35 hg status
36
36
37 hg commit -m "merge" -d "1000000 0"
37 hg commit -m "merge" -d "1000000 0"
38
38
39 # create fake head, make sure tag not visible afterwards
39 # create fake head, make sure tag not visible afterwards
40 cp .hgtags tags
40 cp .hgtags tags
41 hg tag -d "1000000 0" last
41 hg tag -d "1000000 0" last
42 hg rm .hgtags
42 hg rm .hgtags
43 hg commit -m "remove" -d "1000000 0"
43 hg commit -m "remove" -d "1000000 0"
44
44
45 mv tags .hgtags
45 mv tags .hgtags
46 hg add .hgtags
46 hg add .hgtags
47 hg commit -m "readd" -d "1000000 0"
47 hg commit -m "readd" -d "1000000 0"
48
48
49 hg tags
49 hg tags
50
50
51 # invalid tags
51 # invalid tags
52 echo "spam" >> .hgtags
52 echo "spam" >> .hgtags
53 echo >> .hgtags
53 echo >> .hgtags
54 echo "foo bar" >> .hgtags
54 echo "foo bar" >> .hgtags
55 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
55 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
56 hg commit -m "tags" -d "1000000 0"
56 hg commit -m "tags" -d "1000000 0"
57
57
58 # report tag parse error on other head
58 # report tag parse error on other head
59 hg up 3
59 hg up 3
60 echo 'x y' >> .hgtags
60 echo 'x y' >> .hgtags
61 hg commit -m "head" -d "1000000 0"
61 hg commit -m "head" -d "1000000 0"
62
62
63 hg tags
63 hg tags
64 hg tip
64 hg tip
65
65
66 # test tag precedence rules
66 # test tag precedence rules
67 cd ..
67 cd ..
68 hg init t2
68 hg init t2
69 cd t2
69 cd t2
70 echo foo > foo
70 echo foo > foo
71 hg add foo
71 hg add foo
72 hg ci -m 'add foo' -d '1000000 0' # rev 0
72 hg ci -m 'add foo' -d '1000000 0' # rev 0
73 hg tag -d '1000000 0' bar # rev 1
73 hg tag -d '1000000 0' bar # rev 1
74 echo >> foo
74 echo >> foo
75 hg ci -m 'change foo 1' -d '1000000 0' # rev 2
75 hg ci -m 'change foo 1' -d '1000000 0' # rev 2
76 hg up -C 1
76 hg up -C 1
77 hg tag -r 1 -d '1000000 0' bar # rev 3
77 hg tag -r 1 -d '1000000 0' bar # rev 3
78 hg up -C 1
78 hg up -C 1
79 echo >> foo
79 echo >> foo
80 hg ci -m 'change foo 2' -d '1000000 0' # rev 4
80 hg ci -m 'change foo 2' -d '1000000 0' # rev 4
81 hg tags
81 hg tags
82
82
83 # test tag removal
83 hg tag --remove -d '1000000 0' bar
84 hg tag --remove -d '1000000 0' bar
84 hg tip
85 hg tip
85 hg tags
86 hg tags
87
88 # test tag rank
89 cd ..
90 hg init t3
91 cd t3
92 echo foo > foo
93 hg add foo
94 hg ci -m 'add foo' -d '1000000 0' # rev 0
95 hg tag -d '1000000 0' bar # rev 1 bar -> 0
96 hg tag -d '1000000 0' bar # rev 2 bar -> 1
97 hg tag -d '1000000 0' -r 0 bar # rev 3 bar -> 0
98 hg tag -d '1000000 0' -r 1 bar # rev 3 bar -> 1
99 hg tag -d '1000000 0' -r 0 bar # rev 4 bar -> 0
100 hg tags
101 hg co 3
102 echo barbar > foo
103 hg ci -m 'change foo' -d '1000000 0' # rev 0
104 hg tags
@@ -1,51 +1,56
1 unknown
1 unknown
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 0acdaf898367 tip
3 0acdaf898367 tip
4 tip 0:0acdaf898367
4 tip 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
7 tip 1:8a3ca90d111d
7 tip 1:8a3ca90d111d
8 first 0:0acdaf898367
8 first 0:0acdaf898367
9 8a3ca90d111d tip
9 8a3ca90d111d tip
10 M a
10 M a
11 8a3ca90d111d+ tip
11 8a3ca90d111d+ tip
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
13 0acdaf898367+ first
13 0acdaf898367+ first
14 0acdaf898367+ first
14 0acdaf898367+ first
15 M a
15 M a
16 8216907a933d tip
16 8216907a933d tip
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 (branch merge, don't forget to commit)
18 (branch merge, don't forget to commit)
19 8216907a933d+8a3ca90d111d+ tip
19 8216907a933d+8a3ca90d111d+ tip
20 M .hgtags
20 M .hgtags
21 tip 6:e2174d339386
21 tip 6:e2174d339386
22 first 0:0acdaf898367
22 first 0:0acdaf898367
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 .hgtags@c071f74ab5eb, line 2: cannot parse entry
24 .hgtags@c071f74ab5eb, line 2: cannot parse entry
25 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
25 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
26 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
26 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
27 localtags, line 1: tag 'invalid' refers to unknown node
27 localtags, line 1: tag 'invalid' refers to unknown node
28 tip 8:4ca6f1b1a68c
28 tip 8:4ca6f1b1a68c
29 first 0:0acdaf898367
29 first 0:0acdaf898367
30 changeset: 8:4ca6f1b1a68c
30 changeset: 8:4ca6f1b1a68c
31 .hgtags@c071f74ab5eb, line 2: cannot parse entry
31 .hgtags@c071f74ab5eb, line 2: cannot parse entry
32 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
32 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
33 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
33 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
34 localtags, line 1: tag 'invalid' refers to unknown node
34 localtags, line 1: tag 'invalid' refers to unknown node
35 tag: tip
35 tag: tip
36 parent: 3:b2ef3841386b
36 parent: 3:b2ef3841386b
37 user: test
37 user: test
38 date: Mon Jan 12 13:46:40 1970 +0000
38 date: Mon Jan 12 13:46:40 1970 +0000
39 summary: head
39 summary: head
40
40
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 tip 4:36195b728445
43 tip 4:36195b728445
44 bar 1:b204a97e6e8d
44 bar 1:b204a97e6e8d
45 changeset: 5:57e1983b4a60
45 changeset: 5:57e1983b4a60
46 tag: tip
46 tag: tip
47 user: test
47 user: test
48 date: Mon Jan 12 13:46:40 1970 +0000
48 date: Mon Jan 12 13:46:40 1970 +0000
49 summary: Removed tag bar
49 summary: Removed tag bar
50
50
51 tip 5:57e1983b4a60
51 tip 5:57e1983b4a60
52 tip 5:d8bb4d1eff25
53 bar 0:b409d9da318e
54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 tip 6:b5ff9d142648
56 bar 0:b409d9da318e
General Comments 0
You need to be logged in to leave comments. Login now