##// END OF EJS Templates
use .extend instead of .append in readtags
Alexis S. L. Carvalho -
r4482:99f411ba default
parent child Browse files
Show More
@@ -1,2020 +1,2020 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __del__(self):
21 def __del__(self):
22 self.transhandle = None
22 self.transhandle = None
23 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
24 repo.repository.__init__(self)
24 repo.repository.__init__(self)
25 if not path:
25 if not path:
26 p = os.getcwd()
26 p = os.getcwd()
27 while not os.path.isdir(os.path.join(p, ".hg")):
27 while not os.path.isdir(os.path.join(p, ".hg")):
28 oldp = p
28 oldp = p
29 p = os.path.dirname(p)
29 p = os.path.dirname(p)
30 if p == oldp:
30 if p == oldp:
31 raise repo.RepoError(_("There is no Mercurial repository"
31 raise repo.RepoError(_("There is no Mercurial repository"
32 " here (.hg not found)"))
32 " here (.hg not found)"))
33 path = p
33 path = p
34
34
35 self.root = os.path.realpath(path)
35 self.root = os.path.realpath(path)
36 self.path = os.path.join(self.root, ".hg")
36 self.path = os.path.join(self.root, ".hg")
37 self.origroot = path
37 self.origroot = path
38 self.opener = util.opener(self.path)
38 self.opener = util.opener(self.path)
39 self.wopener = util.opener(self.root)
39 self.wopener = util.opener(self.root)
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements = ("revlogv1", "store")
47 requirements = ("revlogv1", "store")
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 v = self.ui.configrevlog()
91 v = self.ui.configrevlog()
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 fl = v.get('flags', None)
94 fl = v.get('flags', None)
95 flags = 0
95 flags = 0
96 if fl != None:
96 if fl != None:
97 for x in fl.split():
97 for x in fl.split():
98 flags |= revlog.flagstr(x)
98 flags |= revlog.flagstr(x)
99 elif self.revlogv1:
99 elif self.revlogv1:
100 flags = revlog.REVLOG_DEFAULT_FLAGS
100 flags = revlog.REVLOG_DEFAULT_FLAGS
101
101
102 v = self.revlogversion | flags
102 v = self.revlogversion | flags
103 self.manifest = manifest.manifest(self.sopener, v)
103 self.manifest = manifest.manifest(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
104 self.changelog = changelog.changelog(self.sopener, v)
105
105
106 fallback = self.ui.config('ui', 'fallbackencoding')
106 fallback = self.ui.config('ui', 'fallbackencoding')
107 if fallback:
107 if fallback:
108 util._fallbackencoding = fallback
108 util._fallbackencoding = fallback
109
109
110 # the changelog might not have the inline index flag
110 # the changelog might not have the inline index flag
111 # on. If the format of the changelog is the same as found in
111 # on. If the format of the changelog is the same as found in
112 # .hgrc, apply any flags found in the .hgrc as well.
112 # .hgrc, apply any flags found in the .hgrc as well.
113 # Otherwise, just version from the changelog
113 # Otherwise, just version from the changelog
114 v = self.changelog.version
114 v = self.changelog.version
115 if v == self.revlogversion:
115 if v == self.revlogversion:
116 v |= flags
116 v |= flags
117 self.revlogversion = v
117 self.revlogversion = v
118
118
119 self.tagscache = None
119 self.tagscache = None
120 self.branchcache = None
120 self.branchcache = None
121 self.nodetagscache = None
121 self.nodetagscache = None
122 self.encodepats = None
122 self.encodepats = None
123 self.decodepats = None
123 self.decodepats = None
124 self.transhandle = None
124 self.transhandle = None
125
125
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def url(self):
128 def url(self):
129 return 'file:' + self.root
129 return 'file:' + self.root
130
130
131 def hook(self, name, throw=False, **args):
131 def hook(self, name, throw=False, **args):
132 def callhook(hname, funcname):
132 def callhook(hname, funcname):
133 '''call python hook. hook is callable object, looked up as
133 '''call python hook. hook is callable object, looked up as
134 name in python module. if callable returns "true", hook
134 name in python module. if callable returns "true", hook
135 fails, else passes. if hook raises exception, treated as
135 fails, else passes. if hook raises exception, treated as
136 hook failure. exception propagates if throw is "true".
136 hook failure. exception propagates if throw is "true".
137
137
138 reason for "true" meaning "hook failed" is so that
138 reason for "true" meaning "hook failed" is so that
139 unmodified commands (e.g. mercurial.commands.update) can
139 unmodified commands (e.g. mercurial.commands.update) can
140 be run as hooks without wrappers to convert return values.'''
140 be run as hooks without wrappers to convert return values.'''
141
141
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 d = funcname.rfind('.')
143 d = funcname.rfind('.')
144 if d == -1:
144 if d == -1:
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 % (hname, funcname))
146 % (hname, funcname))
147 modname = funcname[:d]
147 modname = funcname[:d]
148 try:
148 try:
149 obj = __import__(modname)
149 obj = __import__(modname)
150 except ImportError:
150 except ImportError:
151 try:
151 try:
152 # extensions are loaded with hgext_ prefix
152 # extensions are loaded with hgext_ prefix
153 obj = __import__("hgext_%s" % modname)
153 obj = __import__("hgext_%s" % modname)
154 except ImportError:
154 except ImportError:
155 raise util.Abort(_('%s hook is invalid '
155 raise util.Abort(_('%s hook is invalid '
156 '(import of "%s" failed)') %
156 '(import of "%s" failed)') %
157 (hname, modname))
157 (hname, modname))
158 try:
158 try:
159 for p in funcname.split('.')[1:]:
159 for p in funcname.split('.')[1:]:
160 obj = getattr(obj, p)
160 obj = getattr(obj, p)
161 except AttributeError, err:
161 except AttributeError, err:
162 raise util.Abort(_('%s hook is invalid '
162 raise util.Abort(_('%s hook is invalid '
163 '("%s" is not defined)') %
163 '("%s" is not defined)') %
164 (hname, funcname))
164 (hname, funcname))
165 if not callable(obj):
165 if not callable(obj):
166 raise util.Abort(_('%s hook is invalid '
166 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not callable)') %
167 '("%s" is not callable)') %
168 (hname, funcname))
168 (hname, funcname))
169 try:
169 try:
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 except (KeyboardInterrupt, util.SignalInterrupt):
171 except (KeyboardInterrupt, util.SignalInterrupt):
172 raise
172 raise
173 except Exception, exc:
173 except Exception, exc:
174 if isinstance(exc, util.Abort):
174 if isinstance(exc, util.Abort):
175 self.ui.warn(_('error: %s hook failed: %s\n') %
175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 (hname, exc.args[0]))
176 (hname, exc.args[0]))
177 else:
177 else:
178 self.ui.warn(_('error: %s hook raised an exception: '
178 self.ui.warn(_('error: %s hook raised an exception: '
179 '%s\n') % (hname, exc))
179 '%s\n') % (hname, exc))
180 if throw:
180 if throw:
181 raise
181 raise
182 self.ui.print_exc()
182 self.ui.print_exc()
183 return True
183 return True
184 if r:
184 if r:
185 if throw:
185 if throw:
186 raise util.Abort(_('%s hook failed') % hname)
186 raise util.Abort(_('%s hook failed') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 return r
188 return r
189
189
190 def runhook(name, cmd):
190 def runhook(name, cmd):
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 r = util.system(cmd, environ=env, cwd=self.root)
193 r = util.system(cmd, environ=env, cwd=self.root)
194 if r:
194 if r:
195 desc, r = util.explain_exit(r)
195 desc, r = util.explain_exit(r)
196 if throw:
196 if throw:
197 raise util.Abort(_('%s hook %s') % (name, desc))
197 raise util.Abort(_('%s hook %s') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 return r
199 return r
200
200
201 r = False
201 r = False
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 if hname.split(".", 1)[0] == name and cmd]
203 if hname.split(".", 1)[0] == name and cmd]
204 hooks.sort()
204 hooks.sort()
205 for hname, cmd in hooks:
205 for hname, cmd in hooks:
206 if cmd.startswith('python:'):
206 if cmd.startswith('python:'):
207 r = callhook(hname, cmd[7:].strip()) or r
207 r = callhook(hname, cmd[7:].strip()) or r
208 else:
208 else:
209 r = runhook(hname, cmd) or r
209 r = runhook(hname, cmd) or r
210 return r
210 return r
211
211
212 tag_disallowed = ':\r\n'
212 tag_disallowed = ':\r\n'
213
213
214 def tag(self, name, node, message, local, user, date):
214 def tag(self, name, node, message, local, user, date):
215 '''tag a revision with a symbolic name.
215 '''tag a revision with a symbolic name.
216
216
217 if local is True, the tag is stored in a per-repository file.
217 if local is True, the tag is stored in a per-repository file.
218 otherwise, it is stored in the .hgtags file, and a new
218 otherwise, it is stored in the .hgtags file, and a new
219 changeset is committed with the change.
219 changeset is committed with the change.
220
220
221 keyword arguments:
221 keyword arguments:
222
222
223 local: whether to store tag in non-version-controlled file
223 local: whether to store tag in non-version-controlled file
224 (default False)
224 (default False)
225
225
226 message: commit message to use if committing
226 message: commit message to use if committing
227
227
228 user: name of user to use if committing
228 user: name of user to use if committing
229
229
230 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
231
231
232 for c in self.tag_disallowed:
232 for c in self.tag_disallowed:
233 if c in name:
233 if c in name:
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235
235
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237
237
238 if local:
238 if local:
239 # local tags are stored in the current charset
239 # local tags are stored in the current charset
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 self.hook('tag', node=hex(node), tag=name, local=local)
241 self.hook('tag', node=hex(node), tag=name, local=local)
242 return
242 return
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 # committed tags are stored in UTF-8
249 # committed tags are stored in UTF-8
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 self.wfile('.hgtags', 'ab').write(line)
251 self.wfile('.hgtags', 'ab').write(line)
252 if self.dirstate.state('.hgtags') == '?':
252 if self.dirstate.state('.hgtags') == '?':
253 self.add(['.hgtags'])
253 self.add(['.hgtags'])
254
254
255 self.commit(['.hgtags'], message, user, date)
255 self.commit(['.hgtags'], message, user, date)
256 self.hook('tag', node=hex(node), tag=name, local=local)
256 self.hook('tag', node=hex(node), tag=name, local=local)
257
257
258 def tags(self):
258 def tags(self):
259 '''return a mapping of tag to node'''
259 '''return a mapping of tag to node'''
260 if self.tagscache:
260 if self.tagscache:
261 return self.tagscache
261 return self.tagscache
262
262
263 globaltags = {}
263 globaltags = {}
264
264
265 def readtags(lines, fn):
265 def readtags(lines, fn):
266 filetags = {}
266 filetags = {}
267 count = 0
267 count = 0
268
268
269 def warn(msg):
269 def warn(msg):
270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
271
271
272 for l in lines:
272 for l in lines:
273 count += 1
273 count += 1
274 if not l:
274 if not l:
275 continue
275 continue
276 s = l.split(" ", 1)
276 s = l.split(" ", 1)
277 if len(s) != 2:
277 if len(s) != 2:
278 warn(_("cannot parse entry"))
278 warn(_("cannot parse entry"))
279 continue
279 continue
280 node, key = s
280 node, key = s
281 key = util.tolocal(key.strip()) # stored in UTF-8
281 key = util.tolocal(key.strip()) # stored in UTF-8
282 try:
282 try:
283 bin_n = bin(node)
283 bin_n = bin(node)
284 except TypeError:
284 except TypeError:
285 warn(_("node '%s' is not well formed") % node)
285 warn(_("node '%s' is not well formed") % node)
286 continue
286 continue
287 if bin_n not in self.changelog.nodemap:
287 if bin_n not in self.changelog.nodemap:
288 warn(_("tag '%s' refers to unknown node") % key)
288 warn(_("tag '%s' refers to unknown node") % key)
289 continue
289 continue
290
290
291 h = []
291 h = []
292 if key in filetags:
292 if key in filetags:
293 n, h = filetags[key]
293 n, h = filetags[key]
294 h.append(n)
294 h.append(n)
295 filetags[key] = (bin_n, h)
295 filetags[key] = (bin_n, h)
296
296
297 for k,nh in filetags.items():
297 for k,nh in filetags.items():
298 if k not in globaltags:
298 if k not in globaltags:
299 globaltags[k] = nh
299 globaltags[k] = nh
300 continue
300 continue
301 # we prefer the global tag if:
301 # we prefer the global tag if:
302 # it supercedes us OR
302 # it supercedes us OR
303 # mutual supercedes and it has a higher rank
303 # mutual supercedes and it has a higher rank
304 # otherwise we win because we're tip-most
304 # otherwise we win because we're tip-most
305 an, ah = nh
305 an, ah = nh
306 bn, bh = globaltags[k]
306 bn, bh = globaltags[k]
307 if bn != an and an in bh and \
307 if bn != an and an in bh and \
308 (bn not in ah or len(bh) > len(ah)):
308 (bn not in ah or len(bh) > len(ah)):
309 an = bn
309 an = bn
310 ah.append([n for n in bh if n not in ah])
310 ah.extend([n for n in bh if n not in ah])
311 globaltags[k] = an, ah
311 globaltags[k] = an, ah
312
312
313 # read the tags file from each head, ending with the tip
313 # read the tags file from each head, ending with the tip
314 f = None
314 f = None
315 for rev, node, fnode in self._hgtagsnodes():
315 for rev, node, fnode in self._hgtagsnodes():
316 f = (f and f.filectx(fnode) or
316 f = (f and f.filectx(fnode) or
317 self.filectx('.hgtags', fileid=fnode))
317 self.filectx('.hgtags', fileid=fnode))
318 readtags(f.data().splitlines(), f)
318 readtags(f.data().splitlines(), f)
319
319
320 try:
320 try:
321 data = util.fromlocal(self.opener("localtags").read())
321 data = util.fromlocal(self.opener("localtags").read())
322 # localtags are stored in the local character set
322 # localtags are stored in the local character set
323 # while the internal tag table is stored in UTF-8
323 # while the internal tag table is stored in UTF-8
324 readtags(data.splitlines(), "localtags")
324 readtags(data.splitlines(), "localtags")
325 except IOError:
325 except IOError:
326 pass
326 pass
327
327
328 self.tagscache = {}
328 self.tagscache = {}
329 for k,nh in globaltags.items():
329 for k,nh in globaltags.items():
330 n = nh[0]
330 n = nh[0]
331 if n != nullid:
331 if n != nullid:
332 self.tagscache[k] = n
332 self.tagscache[k] = n
333 self.tagscache['tip'] = self.changelog.tip()
333 self.tagscache['tip'] = self.changelog.tip()
334
334
335 return self.tagscache
335 return self.tagscache
336
336
337 def _hgtagsnodes(self):
337 def _hgtagsnodes(self):
338 heads = self.heads()
338 heads = self.heads()
339 heads.reverse()
339 heads.reverse()
340 last = {}
340 last = {}
341 ret = []
341 ret = []
342 for node in heads:
342 for node in heads:
343 c = self.changectx(node)
343 c = self.changectx(node)
344 rev = c.rev()
344 rev = c.rev()
345 try:
345 try:
346 fnode = c.filenode('.hgtags')
346 fnode = c.filenode('.hgtags')
347 except repo.LookupError:
347 except repo.LookupError:
348 continue
348 continue
349 ret.append((rev, node, fnode))
349 ret.append((rev, node, fnode))
350 if fnode in last:
350 if fnode in last:
351 ret[last[fnode]] = None
351 ret[last[fnode]] = None
352 last[fnode] = len(ret) - 1
352 last[fnode] = len(ret) - 1
353 return [item for item in ret if item]
353 return [item for item in ret if item]
354
354
355 def tagslist(self):
355 def tagslist(self):
356 '''return a list of tags ordered by revision'''
356 '''return a list of tags ordered by revision'''
357 l = []
357 l = []
358 for t, n in self.tags().items():
358 for t, n in self.tags().items():
359 try:
359 try:
360 r = self.changelog.rev(n)
360 r = self.changelog.rev(n)
361 except:
361 except:
362 r = -2 # sort to the beginning of the list if unknown
362 r = -2 # sort to the beginning of the list if unknown
363 l.append((r, t, n))
363 l.append((r, t, n))
364 l.sort()
364 l.sort()
365 return [(t, n) for r, t, n in l]
365 return [(t, n) for r, t, n in l]
366
366
367 def nodetags(self, node):
367 def nodetags(self, node):
368 '''return the tags associated with a node'''
368 '''return the tags associated with a node'''
369 if not self.nodetagscache:
369 if not self.nodetagscache:
370 self.nodetagscache = {}
370 self.nodetagscache = {}
371 for t, n in self.tags().items():
371 for t, n in self.tags().items():
372 self.nodetagscache.setdefault(n, []).append(t)
372 self.nodetagscache.setdefault(n, []).append(t)
373 return self.nodetagscache.get(node, [])
373 return self.nodetagscache.get(node, [])
374
374
375 def _branchtags(self):
375 def _branchtags(self):
376 partial, last, lrev = self._readbranchcache()
376 partial, last, lrev = self._readbranchcache()
377
377
378 tiprev = self.changelog.count() - 1
378 tiprev = self.changelog.count() - 1
379 if lrev != tiprev:
379 if lrev != tiprev:
380 self._updatebranchcache(partial, lrev+1, tiprev+1)
380 self._updatebranchcache(partial, lrev+1, tiprev+1)
381 self._writebranchcache(partial, self.changelog.tip(), tiprev)
381 self._writebranchcache(partial, self.changelog.tip(), tiprev)
382
382
383 return partial
383 return partial
384
384
385 def branchtags(self):
385 def branchtags(self):
386 if self.branchcache is not None:
386 if self.branchcache is not None:
387 return self.branchcache
387 return self.branchcache
388
388
389 self.branchcache = {} # avoid recursion in changectx
389 self.branchcache = {} # avoid recursion in changectx
390 partial = self._branchtags()
390 partial = self._branchtags()
391
391
392 # the branch cache is stored on disk as UTF-8, but in the local
392 # the branch cache is stored on disk as UTF-8, but in the local
393 # charset internally
393 # charset internally
394 for k, v in partial.items():
394 for k, v in partial.items():
395 self.branchcache[util.tolocal(k)] = v
395 self.branchcache[util.tolocal(k)] = v
396 return self.branchcache
396 return self.branchcache
397
397
398 def _readbranchcache(self):
398 def _readbranchcache(self):
399 partial = {}
399 partial = {}
400 try:
400 try:
401 f = self.opener("branch.cache")
401 f = self.opener("branch.cache")
402 lines = f.read().split('\n')
402 lines = f.read().split('\n')
403 f.close()
403 f.close()
404 except (IOError, OSError):
404 except (IOError, OSError):
405 return {}, nullid, nullrev
405 return {}, nullid, nullrev
406
406
407 try:
407 try:
408 last, lrev = lines.pop(0).split(" ", 1)
408 last, lrev = lines.pop(0).split(" ", 1)
409 last, lrev = bin(last), int(lrev)
409 last, lrev = bin(last), int(lrev)
410 if not (lrev < self.changelog.count() and
410 if not (lrev < self.changelog.count() and
411 self.changelog.node(lrev) == last): # sanity check
411 self.changelog.node(lrev) == last): # sanity check
412 # invalidate the cache
412 # invalidate the cache
413 raise ValueError('Invalid branch cache: unknown tip')
413 raise ValueError('Invalid branch cache: unknown tip')
414 for l in lines:
414 for l in lines:
415 if not l: continue
415 if not l: continue
416 node, label = l.split(" ", 1)
416 node, label = l.split(" ", 1)
417 partial[label.strip()] = bin(node)
417 partial[label.strip()] = bin(node)
418 except (KeyboardInterrupt, util.SignalInterrupt):
418 except (KeyboardInterrupt, util.SignalInterrupt):
419 raise
419 raise
420 except Exception, inst:
420 except Exception, inst:
421 if self.ui.debugflag:
421 if self.ui.debugflag:
422 self.ui.warn(str(inst), '\n')
422 self.ui.warn(str(inst), '\n')
423 partial, last, lrev = {}, nullid, nullrev
423 partial, last, lrev = {}, nullid, nullrev
424 return partial, last, lrev
424 return partial, last, lrev
425
425
426 def _writebranchcache(self, branches, tip, tiprev):
426 def _writebranchcache(self, branches, tip, tiprev):
427 try:
427 try:
428 f = self.opener("branch.cache", "w", atomictemp=True)
428 f = self.opener("branch.cache", "w", atomictemp=True)
429 f.write("%s %s\n" % (hex(tip), tiprev))
429 f.write("%s %s\n" % (hex(tip), tiprev))
430 for label, node in branches.iteritems():
430 for label, node in branches.iteritems():
431 f.write("%s %s\n" % (hex(node), label))
431 f.write("%s %s\n" % (hex(node), label))
432 f.rename()
432 f.rename()
433 except (IOError, OSError):
433 except (IOError, OSError):
434 pass
434 pass
435
435
436 def _updatebranchcache(self, partial, start, end):
436 def _updatebranchcache(self, partial, start, end):
437 for r in xrange(start, end):
437 for r in xrange(start, end):
438 c = self.changectx(r)
438 c = self.changectx(r)
439 b = c.branch()
439 b = c.branch()
440 partial[b] = c.node()
440 partial[b] = c.node()
441
441
442 def lookup(self, key):
442 def lookup(self, key):
443 if key == '.':
443 if key == '.':
444 key = self.dirstate.parents()[0]
444 key = self.dirstate.parents()[0]
445 if key == nullid:
445 if key == nullid:
446 raise repo.RepoError(_("no revision checked out"))
446 raise repo.RepoError(_("no revision checked out"))
447 elif key == 'null':
447 elif key == 'null':
448 return nullid
448 return nullid
449 n = self.changelog._match(key)
449 n = self.changelog._match(key)
450 if n:
450 if n:
451 return n
451 return n
452 if key in self.tags():
452 if key in self.tags():
453 return self.tags()[key]
453 return self.tags()[key]
454 if key in self.branchtags():
454 if key in self.branchtags():
455 return self.branchtags()[key]
455 return self.branchtags()[key]
456 n = self.changelog._partialmatch(key)
456 n = self.changelog._partialmatch(key)
457 if n:
457 if n:
458 return n
458 return n
459 raise repo.RepoError(_("unknown revision '%s'") % key)
459 raise repo.RepoError(_("unknown revision '%s'") % key)
460
460
461 def dev(self):
461 def dev(self):
462 return os.lstat(self.path).st_dev
462 return os.lstat(self.path).st_dev
463
463
464 def local(self):
464 def local(self):
465 return True
465 return True
466
466
467 def join(self, f):
467 def join(self, f):
468 return os.path.join(self.path, f)
468 return os.path.join(self.path, f)
469
469
470 def sjoin(self, f):
470 def sjoin(self, f):
471 f = self.encodefn(f)
471 f = self.encodefn(f)
472 return os.path.join(self.spath, f)
472 return os.path.join(self.spath, f)
473
473
474 def wjoin(self, f):
474 def wjoin(self, f):
475 return os.path.join(self.root, f)
475 return os.path.join(self.root, f)
476
476
477 def file(self, f):
477 def file(self, f):
478 if f[0] == '/':
478 if f[0] == '/':
479 f = f[1:]
479 f = f[1:]
480 return filelog.filelog(self.sopener, f, self.revlogversion)
480 return filelog.filelog(self.sopener, f, self.revlogversion)
481
481
482 def changectx(self, changeid=None):
482 def changectx(self, changeid=None):
483 return context.changectx(self, changeid)
483 return context.changectx(self, changeid)
484
484
485 def workingctx(self):
485 def workingctx(self):
486 return context.workingctx(self)
486 return context.workingctx(self)
487
487
488 def parents(self, changeid=None):
488 def parents(self, changeid=None):
489 '''
489 '''
490 get list of changectxs for parents of changeid or working directory
490 get list of changectxs for parents of changeid or working directory
491 '''
491 '''
492 if changeid is None:
492 if changeid is None:
493 pl = self.dirstate.parents()
493 pl = self.dirstate.parents()
494 else:
494 else:
495 n = self.changelog.lookup(changeid)
495 n = self.changelog.lookup(changeid)
496 pl = self.changelog.parents(n)
496 pl = self.changelog.parents(n)
497 if pl[1] == nullid:
497 if pl[1] == nullid:
498 return [self.changectx(pl[0])]
498 return [self.changectx(pl[0])]
499 return [self.changectx(pl[0]), self.changectx(pl[1])]
499 return [self.changectx(pl[0]), self.changectx(pl[1])]
500
500
501 def filectx(self, path, changeid=None, fileid=None):
501 def filectx(self, path, changeid=None, fileid=None):
502 """changeid can be a changeset revision, node, or tag.
502 """changeid can be a changeset revision, node, or tag.
503 fileid can be a file revision or node."""
503 fileid can be a file revision or node."""
504 return context.filectx(self, path, changeid, fileid)
504 return context.filectx(self, path, changeid, fileid)
505
505
506 def getcwd(self):
506 def getcwd(self):
507 return self.dirstate.getcwd()
507 return self.dirstate.getcwd()
508
508
509 def wfile(self, f, mode='r'):
509 def wfile(self, f, mode='r'):
510 return self.wopener(f, mode)
510 return self.wopener(f, mode)
511
511
512 def wread(self, filename):
512 def wread(self, filename):
513 if self.encodepats == None:
513 if self.encodepats == None:
514 l = []
514 l = []
515 for pat, cmd in self.ui.configitems("encode"):
515 for pat, cmd in self.ui.configitems("encode"):
516 mf = util.matcher(self.root, "", [pat], [], [])[1]
516 mf = util.matcher(self.root, "", [pat], [], [])[1]
517 l.append((mf, cmd))
517 l.append((mf, cmd))
518 self.encodepats = l
518 self.encodepats = l
519
519
520 data = self.wopener(filename, 'r').read()
520 data = self.wopener(filename, 'r').read()
521
521
522 for mf, cmd in self.encodepats:
522 for mf, cmd in self.encodepats:
523 if mf(filename):
523 if mf(filename):
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
525 data = util.filter(data, cmd)
525 data = util.filter(data, cmd)
526 break
526 break
527
527
528 return data
528 return data
529
529
530 def wwrite(self, filename, data, fd=None):
530 def wwrite(self, filename, data, fd=None):
531 if self.decodepats == None:
531 if self.decodepats == None:
532 l = []
532 l = []
533 for pat, cmd in self.ui.configitems("decode"):
533 for pat, cmd in self.ui.configitems("decode"):
534 mf = util.matcher(self.root, "", [pat], [], [])[1]
534 mf = util.matcher(self.root, "", [pat], [], [])[1]
535 l.append((mf, cmd))
535 l.append((mf, cmd))
536 self.decodepats = l
536 self.decodepats = l
537
537
538 for mf, cmd in self.decodepats:
538 for mf, cmd in self.decodepats:
539 if mf(filename):
539 if mf(filename):
540 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
540 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
541 data = util.filter(data, cmd)
541 data = util.filter(data, cmd)
542 break
542 break
543
543
544 if fd:
544 if fd:
545 return fd.write(data)
545 return fd.write(data)
546 return self.wopener(filename, 'w').write(data)
546 return self.wopener(filename, 'w').write(data)
547
547
548 def transaction(self):
548 def transaction(self):
549 tr = self.transhandle
549 tr = self.transhandle
550 if tr != None and tr.running():
550 if tr != None and tr.running():
551 return tr.nest()
551 return tr.nest()
552
552
553 # save dirstate for rollback
553 # save dirstate for rollback
554 try:
554 try:
555 ds = self.opener("dirstate").read()
555 ds = self.opener("dirstate").read()
556 except IOError:
556 except IOError:
557 ds = ""
557 ds = ""
558 self.opener("journal.dirstate", "w").write(ds)
558 self.opener("journal.dirstate", "w").write(ds)
559
559
560 renames = [(self.sjoin("journal"), self.sjoin("undo")),
560 renames = [(self.sjoin("journal"), self.sjoin("undo")),
561 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
561 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
562 tr = transaction.transaction(self.ui.warn, self.sopener,
562 tr = transaction.transaction(self.ui.warn, self.sopener,
563 self.sjoin("journal"),
563 self.sjoin("journal"),
564 aftertrans(renames))
564 aftertrans(renames))
565 self.transhandle = tr
565 self.transhandle = tr
566 return tr
566 return tr
567
567
568 def recover(self):
568 def recover(self):
569 l = self.lock()
569 l = self.lock()
570 if os.path.exists(self.sjoin("journal")):
570 if os.path.exists(self.sjoin("journal")):
571 self.ui.status(_("rolling back interrupted transaction\n"))
571 self.ui.status(_("rolling back interrupted transaction\n"))
572 transaction.rollback(self.sopener, self.sjoin("journal"))
572 transaction.rollback(self.sopener, self.sjoin("journal"))
573 self.reload()
573 self.reload()
574 return True
574 return True
575 else:
575 else:
576 self.ui.warn(_("no interrupted transaction available\n"))
576 self.ui.warn(_("no interrupted transaction available\n"))
577 return False
577 return False
578
578
579 def rollback(self, wlock=None):
579 def rollback(self, wlock=None):
580 if not wlock:
580 if not wlock:
581 wlock = self.wlock()
581 wlock = self.wlock()
582 l = self.lock()
582 l = self.lock()
583 if os.path.exists(self.sjoin("undo")):
583 if os.path.exists(self.sjoin("undo")):
584 self.ui.status(_("rolling back last transaction\n"))
584 self.ui.status(_("rolling back last transaction\n"))
585 transaction.rollback(self.sopener, self.sjoin("undo"))
585 transaction.rollback(self.sopener, self.sjoin("undo"))
586 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
586 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
587 self.reload()
587 self.reload()
588 self.wreload()
588 self.wreload()
589 else:
589 else:
590 self.ui.warn(_("no rollback information available\n"))
590 self.ui.warn(_("no rollback information available\n"))
591
591
592 def wreload(self):
592 def wreload(self):
593 self.dirstate.read()
593 self.dirstate.read()
594
594
595 def reload(self):
595 def reload(self):
596 self.changelog.load()
596 self.changelog.load()
597 self.manifest.load()
597 self.manifest.load()
598 self.tagscache = None
598 self.tagscache = None
599 self.nodetagscache = None
599 self.nodetagscache = None
600
600
601 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
601 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
602 desc=None):
602 desc=None):
603 try:
603 try:
604 l = lock.lock(lockname, 0, releasefn, desc=desc)
604 l = lock.lock(lockname, 0, releasefn, desc=desc)
605 except lock.LockHeld, inst:
605 except lock.LockHeld, inst:
606 if not wait:
606 if not wait:
607 raise
607 raise
608 self.ui.warn(_("waiting for lock on %s held by %r\n") %
608 self.ui.warn(_("waiting for lock on %s held by %r\n") %
609 (desc, inst.locker))
609 (desc, inst.locker))
610 # default to 600 seconds timeout
610 # default to 600 seconds timeout
611 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
611 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
612 releasefn, desc=desc)
612 releasefn, desc=desc)
613 if acquirefn:
613 if acquirefn:
614 acquirefn()
614 acquirefn()
615 return l
615 return l
616
616
617 def lock(self, wait=1):
617 def lock(self, wait=1):
618 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
618 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
619 desc=_('repository %s') % self.origroot)
619 desc=_('repository %s') % self.origroot)
620
620
621 def wlock(self, wait=1):
621 def wlock(self, wait=1):
622 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
622 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
623 self.wreload,
623 self.wreload,
624 desc=_('working directory of %s') % self.origroot)
624 desc=_('working directory of %s') % self.origroot)
625
625
626 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
626 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
627 """
627 """
628 commit an individual file as part of a larger transaction
628 commit an individual file as part of a larger transaction
629 """
629 """
630
630
631 t = self.wread(fn)
631 t = self.wread(fn)
632 fl = self.file(fn)
632 fl = self.file(fn)
633 fp1 = manifest1.get(fn, nullid)
633 fp1 = manifest1.get(fn, nullid)
634 fp2 = manifest2.get(fn, nullid)
634 fp2 = manifest2.get(fn, nullid)
635
635
636 meta = {}
636 meta = {}
637 cp = self.dirstate.copied(fn)
637 cp = self.dirstate.copied(fn)
638 if cp:
638 if cp:
639 # Mark the new revision of this file as a copy of another
639 # Mark the new revision of this file as a copy of another
640 # file. This copy data will effectively act as a parent
640 # file. This copy data will effectively act as a parent
641 # of this new revision. If this is a merge, the first
641 # of this new revision. If this is a merge, the first
642 # parent will be the nullid (meaning "look up the copy data")
642 # parent will be the nullid (meaning "look up the copy data")
643 # and the second one will be the other parent. For example:
643 # and the second one will be the other parent. For example:
644 #
644 #
645 # 0 --- 1 --- 3 rev1 changes file foo
645 # 0 --- 1 --- 3 rev1 changes file foo
646 # \ / rev2 renames foo to bar and changes it
646 # \ / rev2 renames foo to bar and changes it
647 # \- 2 -/ rev3 should have bar with all changes and
647 # \- 2 -/ rev3 should have bar with all changes and
648 # should record that bar descends from
648 # should record that bar descends from
649 # bar in rev2 and foo in rev1
649 # bar in rev2 and foo in rev1
650 #
650 #
651 # this allows this merge to succeed:
651 # this allows this merge to succeed:
652 #
652 #
653 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
653 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
654 # \ / merging rev3 and rev4 should use bar@rev2
654 # \ / merging rev3 and rev4 should use bar@rev2
655 # \- 2 --- 4 as the merge base
655 # \- 2 --- 4 as the merge base
656 #
656 #
657 meta["copy"] = cp
657 meta["copy"] = cp
658 if not manifest2: # not a branch merge
658 if not manifest2: # not a branch merge
659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
660 fp2 = nullid
660 fp2 = nullid
661 elif fp2 != nullid: # copied on remote side
661 elif fp2 != nullid: # copied on remote side
662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
663 elif fp1 != nullid: # copied on local side, reversed
663 elif fp1 != nullid: # copied on local side, reversed
664 meta["copyrev"] = hex(manifest2.get(cp))
664 meta["copyrev"] = hex(manifest2.get(cp))
665 fp2 = fp1
665 fp2 = fp1
666 else: # directory rename
666 else: # directory rename
667 meta["copyrev"] = hex(manifest1.get(cp, nullid))
667 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 self.ui.debug(_(" %s: copy %s:%s\n") %
668 self.ui.debug(_(" %s: copy %s:%s\n") %
669 (fn, cp, meta["copyrev"]))
669 (fn, cp, meta["copyrev"]))
670 fp1 = nullid
670 fp1 = nullid
671 elif fp2 != nullid:
671 elif fp2 != nullid:
672 # is one parent an ancestor of the other?
672 # is one parent an ancestor of the other?
673 fpa = fl.ancestor(fp1, fp2)
673 fpa = fl.ancestor(fp1, fp2)
674 if fpa == fp1:
674 if fpa == fp1:
675 fp1, fp2 = fp2, nullid
675 fp1, fp2 = fp2, nullid
676 elif fpa == fp2:
676 elif fpa == fp2:
677 fp2 = nullid
677 fp2 = nullid
678
678
679 # is the file unmodified from the parent? report existing entry
679 # is the file unmodified from the parent? report existing entry
680 if fp2 == nullid and not fl.cmp(fp1, t):
680 if fp2 == nullid and not fl.cmp(fp1, t):
681 return fp1
681 return fp1
682
682
683 changelist.append(fn)
683 changelist.append(fn)
684 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
684 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
685
685
686 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
686 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
687 if p1 is None:
687 if p1 is None:
688 p1, p2 = self.dirstate.parents()
688 p1, p2 = self.dirstate.parents()
689 return self.commit(files=files, text=text, user=user, date=date,
689 return self.commit(files=files, text=text, user=user, date=date,
690 p1=p1, p2=p2, wlock=wlock)
690 p1=p1, p2=p2, wlock=wlock)
691
691
692 def commit(self, files=None, text="", user=None, date=None,
692 def commit(self, files=None, text="", user=None, date=None,
693 match=util.always, force=False, lock=None, wlock=None,
693 match=util.always, force=False, lock=None, wlock=None,
694 force_editor=False, p1=None, p2=None, extra={}):
694 force_editor=False, p1=None, p2=None, extra={}):
695
695
696 commit = []
696 commit = []
697 remove = []
697 remove = []
698 changed = []
698 changed = []
699 use_dirstate = (p1 is None) # not rawcommit
699 use_dirstate = (p1 is None) # not rawcommit
700 extra = extra.copy()
700 extra = extra.copy()
701
701
702 if use_dirstate:
702 if use_dirstate:
703 if files:
703 if files:
704 for f in files:
704 for f in files:
705 s = self.dirstate.state(f)
705 s = self.dirstate.state(f)
706 if s in 'nmai':
706 if s in 'nmai':
707 commit.append(f)
707 commit.append(f)
708 elif s == 'r':
708 elif s == 'r':
709 remove.append(f)
709 remove.append(f)
710 else:
710 else:
711 self.ui.warn(_("%s not tracked!\n") % f)
711 self.ui.warn(_("%s not tracked!\n") % f)
712 else:
712 else:
713 changes = self.status(match=match)[:5]
713 changes = self.status(match=match)[:5]
714 modified, added, removed, deleted, unknown = changes
714 modified, added, removed, deleted, unknown = changes
715 commit = modified + added
715 commit = modified + added
716 remove = removed
716 remove = removed
717 else:
717 else:
718 commit = files
718 commit = files
719
719
720 if use_dirstate:
720 if use_dirstate:
721 p1, p2 = self.dirstate.parents()
721 p1, p2 = self.dirstate.parents()
722 update_dirstate = True
722 update_dirstate = True
723 else:
723 else:
724 p1, p2 = p1, p2 or nullid
724 p1, p2 = p1, p2 or nullid
725 update_dirstate = (self.dirstate.parents()[0] == p1)
725 update_dirstate = (self.dirstate.parents()[0] == p1)
726
726
727 c1 = self.changelog.read(p1)
727 c1 = self.changelog.read(p1)
728 c2 = self.changelog.read(p2)
728 c2 = self.changelog.read(p2)
729 m1 = self.manifest.read(c1[0]).copy()
729 m1 = self.manifest.read(c1[0]).copy()
730 m2 = self.manifest.read(c2[0])
730 m2 = self.manifest.read(c2[0])
731
731
732 if use_dirstate:
732 if use_dirstate:
733 branchname = self.workingctx().branch()
733 branchname = self.workingctx().branch()
734 try:
734 try:
735 branchname = branchname.decode('UTF-8').encode('UTF-8')
735 branchname = branchname.decode('UTF-8').encode('UTF-8')
736 except UnicodeDecodeError:
736 except UnicodeDecodeError:
737 raise util.Abort(_('branch name not in UTF-8!'))
737 raise util.Abort(_('branch name not in UTF-8!'))
738 else:
738 else:
739 branchname = ""
739 branchname = ""
740
740
741 if use_dirstate:
741 if use_dirstate:
742 oldname = c1[5].get("branch") # stored in UTF-8
742 oldname = c1[5].get("branch") # stored in UTF-8
743 if not commit and not remove and not force and p2 == nullid and \
743 if not commit and not remove and not force and p2 == nullid and \
744 branchname == oldname:
744 branchname == oldname:
745 self.ui.status(_("nothing changed\n"))
745 self.ui.status(_("nothing changed\n"))
746 return None
746 return None
747
747
748 xp1 = hex(p1)
748 xp1 = hex(p1)
749 if p2 == nullid: xp2 = ''
749 if p2 == nullid: xp2 = ''
750 else: xp2 = hex(p2)
750 else: xp2 = hex(p2)
751
751
752 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
752 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
753
753
754 if not wlock:
754 if not wlock:
755 wlock = self.wlock()
755 wlock = self.wlock()
756 if not lock:
756 if not lock:
757 lock = self.lock()
757 lock = self.lock()
758 tr = self.transaction()
758 tr = self.transaction()
759
759
760 # check in files
760 # check in files
761 new = {}
761 new = {}
762 linkrev = self.changelog.count()
762 linkrev = self.changelog.count()
763 commit.sort()
763 commit.sort()
764 for f in commit:
764 for f in commit:
765 self.ui.note(f + "\n")
765 self.ui.note(f + "\n")
766 try:
766 try:
767 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
767 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
768 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
768 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
769 except IOError:
769 except IOError:
770 if use_dirstate:
770 if use_dirstate:
771 self.ui.warn(_("trouble committing %s!\n") % f)
771 self.ui.warn(_("trouble committing %s!\n") % f)
772 raise
772 raise
773 else:
773 else:
774 remove.append(f)
774 remove.append(f)
775
775
776 # update manifest
776 # update manifest
777 m1.update(new)
777 m1.update(new)
778 remove.sort()
778 remove.sort()
779
779
780 for f in remove:
780 for f in remove:
781 if f in m1:
781 if f in m1:
782 del m1[f]
782 del m1[f]
783 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
783 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
784
784
785 # add changeset
785 # add changeset
786 new = new.keys()
786 new = new.keys()
787 new.sort()
787 new.sort()
788
788
789 user = user or self.ui.username()
789 user = user or self.ui.username()
790 if not text or force_editor:
790 if not text or force_editor:
791 edittext = []
791 edittext = []
792 if text:
792 if text:
793 edittext.append(text)
793 edittext.append(text)
794 edittext.append("")
794 edittext.append("")
795 edittext.append("HG: user: %s" % user)
795 edittext.append("HG: user: %s" % user)
796 if p2 != nullid:
796 if p2 != nullid:
797 edittext.append("HG: branch merge")
797 edittext.append("HG: branch merge")
798 edittext.extend(["HG: changed %s" % f for f in changed])
798 edittext.extend(["HG: changed %s" % f for f in changed])
799 edittext.extend(["HG: removed %s" % f for f in remove])
799 edittext.extend(["HG: removed %s" % f for f in remove])
800 if not changed and not remove:
800 if not changed and not remove:
801 edittext.append("HG: no files changed")
801 edittext.append("HG: no files changed")
802 edittext.append("")
802 edittext.append("")
803 # run editor in the repository root
803 # run editor in the repository root
804 olddir = os.getcwd()
804 olddir = os.getcwd()
805 os.chdir(self.root)
805 os.chdir(self.root)
806 text = self.ui.edit("\n".join(edittext), user)
806 text = self.ui.edit("\n".join(edittext), user)
807 os.chdir(olddir)
807 os.chdir(olddir)
808
808
809 lines = [line.rstrip() for line in text.rstrip().splitlines()]
809 lines = [line.rstrip() for line in text.rstrip().splitlines()]
810 while lines and not lines[0]:
810 while lines and not lines[0]:
811 del lines[0]
811 del lines[0]
812 if not lines:
812 if not lines:
813 return None
813 return None
814 text = '\n'.join(lines)
814 text = '\n'.join(lines)
815 if branchname:
815 if branchname:
816 extra["branch"] = branchname
816 extra["branch"] = branchname
817 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
817 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
818 user, date, extra)
818 user, date, extra)
819 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
819 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
820 parent2=xp2)
820 parent2=xp2)
821 tr.close()
821 tr.close()
822
822
823 if use_dirstate or update_dirstate:
823 if use_dirstate or update_dirstate:
824 self.dirstate.setparents(n)
824 self.dirstate.setparents(n)
825 if use_dirstate:
825 if use_dirstate:
826 self.dirstate.update(new, "n")
826 self.dirstate.update(new, "n")
827 self.dirstate.forget(remove)
827 self.dirstate.forget(remove)
828
828
829 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
829 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
830 return n
830 return n
831
831
832 def walk(self, node=None, files=[], match=util.always, badmatch=None):
832 def walk(self, node=None, files=[], match=util.always, badmatch=None):
833 '''
833 '''
834 walk recursively through the directory tree or a given
834 walk recursively through the directory tree or a given
835 changeset, finding all files matched by the match
835 changeset, finding all files matched by the match
836 function
836 function
837
837
838 results are yielded in a tuple (src, filename), where src
838 results are yielded in a tuple (src, filename), where src
839 is one of:
839 is one of:
840 'f' the file was found in the directory tree
840 'f' the file was found in the directory tree
841 'm' the file was only in the dirstate and not in the tree
841 'm' the file was only in the dirstate and not in the tree
842 'b' file was not found and matched badmatch
842 'b' file was not found and matched badmatch
843 '''
843 '''
844
844
845 if node:
845 if node:
846 fdict = dict.fromkeys(files)
846 fdict = dict.fromkeys(files)
847 for fn in self.manifest.read(self.changelog.read(node)[0]):
847 for fn in self.manifest.read(self.changelog.read(node)[0]):
848 for ffn in fdict:
848 for ffn in fdict:
849 # match if the file is the exact name or a directory
849 # match if the file is the exact name or a directory
850 if ffn == fn or fn.startswith("%s/" % ffn):
850 if ffn == fn or fn.startswith("%s/" % ffn):
851 del fdict[ffn]
851 del fdict[ffn]
852 break
852 break
853 if match(fn):
853 if match(fn):
854 yield 'm', fn
854 yield 'm', fn
855 for fn in fdict:
855 for fn in fdict:
856 if badmatch and badmatch(fn):
856 if badmatch and badmatch(fn):
857 if match(fn):
857 if match(fn):
858 yield 'b', fn
858 yield 'b', fn
859 else:
859 else:
860 self.ui.warn(_('%s: No such file in rev %s\n') % (
860 self.ui.warn(_('%s: No such file in rev %s\n') % (
861 util.pathto(self.root, self.getcwd(), fn), short(node)))
861 util.pathto(self.root, self.getcwd(), fn), short(node)))
862 else:
862 else:
863 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
863 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
864 yield src, fn
864 yield src, fn
865
865
866 def status(self, node1=None, node2=None, files=[], match=util.always,
866 def status(self, node1=None, node2=None, files=[], match=util.always,
867 wlock=None, list_ignored=False, list_clean=False):
867 wlock=None, list_ignored=False, list_clean=False):
868 """return status of files between two nodes or node and working directory
868 """return status of files between two nodes or node and working directory
869
869
870 If node1 is None, use the first dirstate parent instead.
870 If node1 is None, use the first dirstate parent instead.
871 If node2 is None, compare node1 with working directory.
871 If node2 is None, compare node1 with working directory.
872 """
872 """
873
873
874 def fcmp(fn, mf):
874 def fcmp(fn, mf):
875 t1 = self.wread(fn)
875 t1 = self.wread(fn)
876 return self.file(fn).cmp(mf.get(fn, nullid), t1)
876 return self.file(fn).cmp(mf.get(fn, nullid), t1)
877
877
878 def mfmatches(node):
878 def mfmatches(node):
879 change = self.changelog.read(node)
879 change = self.changelog.read(node)
880 mf = self.manifest.read(change[0]).copy()
880 mf = self.manifest.read(change[0]).copy()
881 for fn in mf.keys():
881 for fn in mf.keys():
882 if not match(fn):
882 if not match(fn):
883 del mf[fn]
883 del mf[fn]
884 return mf
884 return mf
885
885
886 modified, added, removed, deleted, unknown = [], [], [], [], []
886 modified, added, removed, deleted, unknown = [], [], [], [], []
887 ignored, clean = [], []
887 ignored, clean = [], []
888
888
889 compareworking = False
889 compareworking = False
890 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
890 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
891 compareworking = True
891 compareworking = True
892
892
893 if not compareworking:
893 if not compareworking:
894 # read the manifest from node1 before the manifest from node2,
894 # read the manifest from node1 before the manifest from node2,
895 # so that we'll hit the manifest cache if we're going through
895 # so that we'll hit the manifest cache if we're going through
896 # all the revisions in parent->child order.
896 # all the revisions in parent->child order.
897 mf1 = mfmatches(node1)
897 mf1 = mfmatches(node1)
898
898
899 # are we comparing the working directory?
899 # are we comparing the working directory?
900 if not node2:
900 if not node2:
901 if not wlock:
901 if not wlock:
902 try:
902 try:
903 wlock = self.wlock(wait=0)
903 wlock = self.wlock(wait=0)
904 except lock.LockException:
904 except lock.LockException:
905 wlock = None
905 wlock = None
906 (lookup, modified, added, removed, deleted, unknown,
906 (lookup, modified, added, removed, deleted, unknown,
907 ignored, clean) = self.dirstate.status(files, match,
907 ignored, clean) = self.dirstate.status(files, match,
908 list_ignored, list_clean)
908 list_ignored, list_clean)
909
909
910 # are we comparing working dir against its parent?
910 # are we comparing working dir against its parent?
911 if compareworking:
911 if compareworking:
912 if lookup:
912 if lookup:
913 # do a full compare of any files that might have changed
913 # do a full compare of any files that might have changed
914 mf2 = mfmatches(self.dirstate.parents()[0])
914 mf2 = mfmatches(self.dirstate.parents()[0])
915 for f in lookup:
915 for f in lookup:
916 if fcmp(f, mf2):
916 if fcmp(f, mf2):
917 modified.append(f)
917 modified.append(f)
918 else:
918 else:
919 clean.append(f)
919 clean.append(f)
920 if wlock is not None:
920 if wlock is not None:
921 self.dirstate.update([f], "n")
921 self.dirstate.update([f], "n")
922 else:
922 else:
923 # we are comparing working dir against non-parent
923 # we are comparing working dir against non-parent
924 # generate a pseudo-manifest for the working dir
924 # generate a pseudo-manifest for the working dir
925 # XXX: create it in dirstate.py ?
925 # XXX: create it in dirstate.py ?
926 mf2 = mfmatches(self.dirstate.parents()[0])
926 mf2 = mfmatches(self.dirstate.parents()[0])
927 for f in lookup + modified + added:
927 for f in lookup + modified + added:
928 mf2[f] = ""
928 mf2[f] = ""
929 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
929 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
930 for f in removed:
930 for f in removed:
931 if f in mf2:
931 if f in mf2:
932 del mf2[f]
932 del mf2[f]
933 else:
933 else:
934 # we are comparing two revisions
934 # we are comparing two revisions
935 mf2 = mfmatches(node2)
935 mf2 = mfmatches(node2)
936
936
937 if not compareworking:
937 if not compareworking:
938 # flush lists from dirstate before comparing manifests
938 # flush lists from dirstate before comparing manifests
939 modified, added, clean = [], [], []
939 modified, added, clean = [], [], []
940
940
941 # make sure to sort the files so we talk to the disk in a
941 # make sure to sort the files so we talk to the disk in a
942 # reasonable order
942 # reasonable order
943 mf2keys = mf2.keys()
943 mf2keys = mf2.keys()
944 mf2keys.sort()
944 mf2keys.sort()
945 for fn in mf2keys:
945 for fn in mf2keys:
946 if mf1.has_key(fn):
946 if mf1.has_key(fn):
947 if mf1.flags(fn) != mf2.flags(fn) or \
947 if mf1.flags(fn) != mf2.flags(fn) or \
948 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
948 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
949 modified.append(fn)
949 modified.append(fn)
950 elif list_clean:
950 elif list_clean:
951 clean.append(fn)
951 clean.append(fn)
952 del mf1[fn]
952 del mf1[fn]
953 else:
953 else:
954 added.append(fn)
954 added.append(fn)
955
955
956 removed = mf1.keys()
956 removed = mf1.keys()
957
957
958 # sort and return results:
958 # sort and return results:
959 for l in modified, added, removed, deleted, unknown, ignored, clean:
959 for l in modified, added, removed, deleted, unknown, ignored, clean:
960 l.sort()
960 l.sort()
961 return (modified, added, removed, deleted, unknown, ignored, clean)
961 return (modified, added, removed, deleted, unknown, ignored, clean)
962
962
963 def add(self, list, wlock=None):
963 def add(self, list, wlock=None):
964 if not wlock:
964 if not wlock:
965 wlock = self.wlock()
965 wlock = self.wlock()
966 for f in list:
966 for f in list:
967 p = self.wjoin(f)
967 p = self.wjoin(f)
968 if not os.path.exists(p):
968 if not os.path.exists(p):
969 self.ui.warn(_("%s does not exist!\n") % f)
969 self.ui.warn(_("%s does not exist!\n") % f)
970 elif not os.path.isfile(p):
970 elif not os.path.isfile(p):
971 self.ui.warn(_("%s not added: only files supported currently\n")
971 self.ui.warn(_("%s not added: only files supported currently\n")
972 % f)
972 % f)
973 elif self.dirstate.state(f) in 'an':
973 elif self.dirstate.state(f) in 'an':
974 self.ui.warn(_("%s already tracked!\n") % f)
974 self.ui.warn(_("%s already tracked!\n") % f)
975 else:
975 else:
976 self.dirstate.update([f], "a")
976 self.dirstate.update([f], "a")
977
977
978 def forget(self, list, wlock=None):
978 def forget(self, list, wlock=None):
979 if not wlock:
979 if not wlock:
980 wlock = self.wlock()
980 wlock = self.wlock()
981 for f in list:
981 for f in list:
982 if self.dirstate.state(f) not in 'ai':
982 if self.dirstate.state(f) not in 'ai':
983 self.ui.warn(_("%s not added!\n") % f)
983 self.ui.warn(_("%s not added!\n") % f)
984 else:
984 else:
985 self.dirstate.forget([f])
985 self.dirstate.forget([f])
986
986
987 def remove(self, list, unlink=False, wlock=None):
987 def remove(self, list, unlink=False, wlock=None):
988 if unlink:
988 if unlink:
989 for f in list:
989 for f in list:
990 try:
990 try:
991 util.unlink(self.wjoin(f))
991 util.unlink(self.wjoin(f))
992 except OSError, inst:
992 except OSError, inst:
993 if inst.errno != errno.ENOENT:
993 if inst.errno != errno.ENOENT:
994 raise
994 raise
995 if not wlock:
995 if not wlock:
996 wlock = self.wlock()
996 wlock = self.wlock()
997 for f in list:
997 for f in list:
998 p = self.wjoin(f)
998 p = self.wjoin(f)
999 if os.path.exists(p):
999 if os.path.exists(p):
1000 self.ui.warn(_("%s still exists!\n") % f)
1000 self.ui.warn(_("%s still exists!\n") % f)
1001 elif self.dirstate.state(f) == 'a':
1001 elif self.dirstate.state(f) == 'a':
1002 self.dirstate.forget([f])
1002 self.dirstate.forget([f])
1003 elif f not in self.dirstate:
1003 elif f not in self.dirstate:
1004 self.ui.warn(_("%s not tracked!\n") % f)
1004 self.ui.warn(_("%s not tracked!\n") % f)
1005 else:
1005 else:
1006 self.dirstate.update([f], "r")
1006 self.dirstate.update([f], "r")
1007
1007
1008 def undelete(self, list, wlock=None):
1008 def undelete(self, list, wlock=None):
1009 p = self.dirstate.parents()[0]
1009 p = self.dirstate.parents()[0]
1010 mn = self.changelog.read(p)[0]
1010 mn = self.changelog.read(p)[0]
1011 m = self.manifest.read(mn)
1011 m = self.manifest.read(mn)
1012 if not wlock:
1012 if not wlock:
1013 wlock = self.wlock()
1013 wlock = self.wlock()
1014 for f in list:
1014 for f in list:
1015 if self.dirstate.state(f) not in "r":
1015 if self.dirstate.state(f) not in "r":
1016 self.ui.warn("%s not removed!\n" % f)
1016 self.ui.warn("%s not removed!\n" % f)
1017 else:
1017 else:
1018 t = self.file(f).read(m[f])
1018 t = self.file(f).read(m[f])
1019 self.wwrite(f, t)
1019 self.wwrite(f, t)
1020 util.set_exec(self.wjoin(f), m.execf(f))
1020 util.set_exec(self.wjoin(f), m.execf(f))
1021 self.dirstate.update([f], "n")
1021 self.dirstate.update([f], "n")
1022
1022
1023 def copy(self, source, dest, wlock=None):
1023 def copy(self, source, dest, wlock=None):
1024 p = self.wjoin(dest)
1024 p = self.wjoin(dest)
1025 if not os.path.exists(p):
1025 if not os.path.exists(p):
1026 self.ui.warn(_("%s does not exist!\n") % dest)
1026 self.ui.warn(_("%s does not exist!\n") % dest)
1027 elif not os.path.isfile(p):
1027 elif not os.path.isfile(p):
1028 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1028 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1029 else:
1029 else:
1030 if not wlock:
1030 if not wlock:
1031 wlock = self.wlock()
1031 wlock = self.wlock()
1032 if self.dirstate.state(dest) == '?':
1032 if self.dirstate.state(dest) == '?':
1033 self.dirstate.update([dest], "a")
1033 self.dirstate.update([dest], "a")
1034 self.dirstate.copy(source, dest)
1034 self.dirstate.copy(source, dest)
1035
1035
1036 def heads(self, start=None):
1036 def heads(self, start=None):
1037 heads = self.changelog.heads(start)
1037 heads = self.changelog.heads(start)
1038 # sort the output in rev descending order
1038 # sort the output in rev descending order
1039 heads = [(-self.changelog.rev(h), h) for h in heads]
1039 heads = [(-self.changelog.rev(h), h) for h in heads]
1040 heads.sort()
1040 heads.sort()
1041 return [n for (r, n) in heads]
1041 return [n for (r, n) in heads]
1042
1042
1043 # branchlookup returns a dict giving a list of branches for
1043 # branchlookup returns a dict giving a list of branches for
1044 # each head. A branch is defined as the tag of a node or
1044 # each head. A branch is defined as the tag of a node or
1045 # the branch of the node's parents. If a node has multiple
1045 # the branch of the node's parents. If a node has multiple
1046 # branch tags, tags are eliminated if they are visible from other
1046 # branch tags, tags are eliminated if they are visible from other
1047 # branch tags.
1047 # branch tags.
1048 #
1048 #
1049 # So, for this graph: a->b->c->d->e
1049 # So, for this graph: a->b->c->d->e
1050 # \ /
1050 # \ /
1051 # aa -----/
1051 # aa -----/
1052 # a has tag 2.6.12
1052 # a has tag 2.6.12
1053 # d has tag 2.6.13
1053 # d has tag 2.6.13
1054 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1054 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1055 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1055 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1056 # from the list.
1056 # from the list.
1057 #
1057 #
1058 # It is possible that more than one head will have the same branch tag.
1058 # It is possible that more than one head will have the same branch tag.
1059 # callers need to check the result for multiple heads under the same
1059 # callers need to check the result for multiple heads under the same
1060 # branch tag if that is a problem for them (ie checkout of a specific
1060 # branch tag if that is a problem for them (ie checkout of a specific
1061 # branch).
1061 # branch).
1062 #
1062 #
1063 # passing in a specific branch will limit the depth of the search
1063 # passing in a specific branch will limit the depth of the search
1064 # through the parents. It won't limit the branches returned in the
1064 # through the parents. It won't limit the branches returned in the
1065 # result though.
1065 # result though.
1066 def branchlookup(self, heads=None, branch=None):
1066 def branchlookup(self, heads=None, branch=None):
1067 if not heads:
1067 if not heads:
1068 heads = self.heads()
1068 heads = self.heads()
1069 headt = [ h for h in heads ]
1069 headt = [ h for h in heads ]
1070 chlog = self.changelog
1070 chlog = self.changelog
1071 branches = {}
1071 branches = {}
1072 merges = []
1072 merges = []
1073 seenmerge = {}
1073 seenmerge = {}
1074
1074
1075 # traverse the tree once for each head, recording in the branches
1075 # traverse the tree once for each head, recording in the branches
1076 # dict which tags are visible from this head. The branches
1076 # dict which tags are visible from this head. The branches
1077 # dict also records which tags are visible from each tag
1077 # dict also records which tags are visible from each tag
1078 # while we traverse.
1078 # while we traverse.
1079 while headt or merges:
1079 while headt or merges:
1080 if merges:
1080 if merges:
1081 n, found = merges.pop()
1081 n, found = merges.pop()
1082 visit = [n]
1082 visit = [n]
1083 else:
1083 else:
1084 h = headt.pop()
1084 h = headt.pop()
1085 visit = [h]
1085 visit = [h]
1086 found = [h]
1086 found = [h]
1087 seen = {}
1087 seen = {}
1088 while visit:
1088 while visit:
1089 n = visit.pop()
1089 n = visit.pop()
1090 if n in seen:
1090 if n in seen:
1091 continue
1091 continue
1092 pp = chlog.parents(n)
1092 pp = chlog.parents(n)
1093 tags = self.nodetags(n)
1093 tags = self.nodetags(n)
1094 if tags:
1094 if tags:
1095 for x in tags:
1095 for x in tags:
1096 if x == 'tip':
1096 if x == 'tip':
1097 continue
1097 continue
1098 for f in found:
1098 for f in found:
1099 branches.setdefault(f, {})[n] = 1
1099 branches.setdefault(f, {})[n] = 1
1100 branches.setdefault(n, {})[n] = 1
1100 branches.setdefault(n, {})[n] = 1
1101 break
1101 break
1102 if n not in found:
1102 if n not in found:
1103 found.append(n)
1103 found.append(n)
1104 if branch in tags:
1104 if branch in tags:
1105 continue
1105 continue
1106 seen[n] = 1
1106 seen[n] = 1
1107 if pp[1] != nullid and n not in seenmerge:
1107 if pp[1] != nullid and n not in seenmerge:
1108 merges.append((pp[1], [x for x in found]))
1108 merges.append((pp[1], [x for x in found]))
1109 seenmerge[n] = 1
1109 seenmerge[n] = 1
1110 if pp[0] != nullid:
1110 if pp[0] != nullid:
1111 visit.append(pp[0])
1111 visit.append(pp[0])
1112 # traverse the branches dict, eliminating branch tags from each
1112 # traverse the branches dict, eliminating branch tags from each
1113 # head that are visible from another branch tag for that head.
1113 # head that are visible from another branch tag for that head.
1114 out = {}
1114 out = {}
1115 viscache = {}
1115 viscache = {}
1116 for h in heads:
1116 for h in heads:
1117 def visible(node):
1117 def visible(node):
1118 if node in viscache:
1118 if node in viscache:
1119 return viscache[node]
1119 return viscache[node]
1120 ret = {}
1120 ret = {}
1121 visit = [node]
1121 visit = [node]
1122 while visit:
1122 while visit:
1123 x = visit.pop()
1123 x = visit.pop()
1124 if x in viscache:
1124 if x in viscache:
1125 ret.update(viscache[x])
1125 ret.update(viscache[x])
1126 elif x not in ret:
1126 elif x not in ret:
1127 ret[x] = 1
1127 ret[x] = 1
1128 if x in branches:
1128 if x in branches:
1129 visit[len(visit):] = branches[x].keys()
1129 visit[len(visit):] = branches[x].keys()
1130 viscache[node] = ret
1130 viscache[node] = ret
1131 return ret
1131 return ret
1132 if h not in branches:
1132 if h not in branches:
1133 continue
1133 continue
1134 # O(n^2), but somewhat limited. This only searches the
1134 # O(n^2), but somewhat limited. This only searches the
1135 # tags visible from a specific head, not all the tags in the
1135 # tags visible from a specific head, not all the tags in the
1136 # whole repo.
1136 # whole repo.
1137 for b in branches[h]:
1137 for b in branches[h]:
1138 vis = False
1138 vis = False
1139 for bb in branches[h].keys():
1139 for bb in branches[h].keys():
1140 if b != bb:
1140 if b != bb:
1141 if b in visible(bb):
1141 if b in visible(bb):
1142 vis = True
1142 vis = True
1143 break
1143 break
1144 if not vis:
1144 if not vis:
1145 l = out.setdefault(h, [])
1145 l = out.setdefault(h, [])
1146 l[len(l):] = self.nodetags(b)
1146 l[len(l):] = self.nodetags(b)
1147 return out
1147 return out
1148
1148
1149 def branches(self, nodes):
1149 def branches(self, nodes):
1150 if not nodes:
1150 if not nodes:
1151 nodes = [self.changelog.tip()]
1151 nodes = [self.changelog.tip()]
1152 b = []
1152 b = []
1153 for n in nodes:
1153 for n in nodes:
1154 t = n
1154 t = n
1155 while 1:
1155 while 1:
1156 p = self.changelog.parents(n)
1156 p = self.changelog.parents(n)
1157 if p[1] != nullid or p[0] == nullid:
1157 if p[1] != nullid or p[0] == nullid:
1158 b.append((t, n, p[0], p[1]))
1158 b.append((t, n, p[0], p[1]))
1159 break
1159 break
1160 n = p[0]
1160 n = p[0]
1161 return b
1161 return b
1162
1162
1163 def between(self, pairs):
1163 def between(self, pairs):
1164 r = []
1164 r = []
1165
1165
1166 for top, bottom in pairs:
1166 for top, bottom in pairs:
1167 n, l, i = top, [], 0
1167 n, l, i = top, [], 0
1168 f = 1
1168 f = 1
1169
1169
1170 while n != bottom:
1170 while n != bottom:
1171 p = self.changelog.parents(n)[0]
1171 p = self.changelog.parents(n)[0]
1172 if i == f:
1172 if i == f:
1173 l.append(n)
1173 l.append(n)
1174 f = f * 2
1174 f = f * 2
1175 n = p
1175 n = p
1176 i += 1
1176 i += 1
1177
1177
1178 r.append(l)
1178 r.append(l)
1179
1179
1180 return r
1180 return r
1181
1181
1182 def findincoming(self, remote, base=None, heads=None, force=False):
1182 def findincoming(self, remote, base=None, heads=None, force=False):
1183 """Return list of roots of the subsets of missing nodes from remote
1183 """Return list of roots of the subsets of missing nodes from remote
1184
1184
1185 If base dict is specified, assume that these nodes and their parents
1185 If base dict is specified, assume that these nodes and their parents
1186 exist on the remote side and that no child of a node of base exists
1186 exist on the remote side and that no child of a node of base exists
1187 in both remote and self.
1187 in both remote and self.
1188 Furthermore base will be updated to include the nodes that exists
1188 Furthermore base will be updated to include the nodes that exists
1189 in self and remote but no children exists in self and remote.
1189 in self and remote but no children exists in self and remote.
1190 If a list of heads is specified, return only nodes which are heads
1190 If a list of heads is specified, return only nodes which are heads
1191 or ancestors of these heads.
1191 or ancestors of these heads.
1192
1192
1193 All the ancestors of base are in self and in remote.
1193 All the ancestors of base are in self and in remote.
1194 All the descendants of the list returned are missing in self.
1194 All the descendants of the list returned are missing in self.
1195 (and so we know that the rest of the nodes are missing in remote, see
1195 (and so we know that the rest of the nodes are missing in remote, see
1196 outgoing)
1196 outgoing)
1197 """
1197 """
1198 m = self.changelog.nodemap
1198 m = self.changelog.nodemap
1199 search = []
1199 search = []
1200 fetch = {}
1200 fetch = {}
1201 seen = {}
1201 seen = {}
1202 seenbranch = {}
1202 seenbranch = {}
1203 if base == None:
1203 if base == None:
1204 base = {}
1204 base = {}
1205
1205
1206 if not heads:
1206 if not heads:
1207 heads = remote.heads()
1207 heads = remote.heads()
1208
1208
1209 if self.changelog.tip() == nullid:
1209 if self.changelog.tip() == nullid:
1210 base[nullid] = 1
1210 base[nullid] = 1
1211 if heads != [nullid]:
1211 if heads != [nullid]:
1212 return [nullid]
1212 return [nullid]
1213 return []
1213 return []
1214
1214
1215 # assume we're closer to the tip than the root
1215 # assume we're closer to the tip than the root
1216 # and start by examining the heads
1216 # and start by examining the heads
1217 self.ui.status(_("searching for changes\n"))
1217 self.ui.status(_("searching for changes\n"))
1218
1218
1219 unknown = []
1219 unknown = []
1220 for h in heads:
1220 for h in heads:
1221 if h not in m:
1221 if h not in m:
1222 unknown.append(h)
1222 unknown.append(h)
1223 else:
1223 else:
1224 base[h] = 1
1224 base[h] = 1
1225
1225
1226 if not unknown:
1226 if not unknown:
1227 return []
1227 return []
1228
1228
1229 req = dict.fromkeys(unknown)
1229 req = dict.fromkeys(unknown)
1230 reqcnt = 0
1230 reqcnt = 0
1231
1231
1232 # search through remote branches
1232 # search through remote branches
1233 # a 'branch' here is a linear segment of history, with four parts:
1233 # a 'branch' here is a linear segment of history, with four parts:
1234 # head, root, first parent, second parent
1234 # head, root, first parent, second parent
1235 # (a branch always has two parents (or none) by definition)
1235 # (a branch always has two parents (or none) by definition)
1236 unknown = remote.branches(unknown)
1236 unknown = remote.branches(unknown)
1237 while unknown:
1237 while unknown:
1238 r = []
1238 r = []
1239 while unknown:
1239 while unknown:
1240 n = unknown.pop(0)
1240 n = unknown.pop(0)
1241 if n[0] in seen:
1241 if n[0] in seen:
1242 continue
1242 continue
1243
1243
1244 self.ui.debug(_("examining %s:%s\n")
1244 self.ui.debug(_("examining %s:%s\n")
1245 % (short(n[0]), short(n[1])))
1245 % (short(n[0]), short(n[1])))
1246 if n[0] == nullid: # found the end of the branch
1246 if n[0] == nullid: # found the end of the branch
1247 pass
1247 pass
1248 elif n in seenbranch:
1248 elif n in seenbranch:
1249 self.ui.debug(_("branch already found\n"))
1249 self.ui.debug(_("branch already found\n"))
1250 continue
1250 continue
1251 elif n[1] and n[1] in m: # do we know the base?
1251 elif n[1] and n[1] in m: # do we know the base?
1252 self.ui.debug(_("found incomplete branch %s:%s\n")
1252 self.ui.debug(_("found incomplete branch %s:%s\n")
1253 % (short(n[0]), short(n[1])))
1253 % (short(n[0]), short(n[1])))
1254 search.append(n) # schedule branch range for scanning
1254 search.append(n) # schedule branch range for scanning
1255 seenbranch[n] = 1
1255 seenbranch[n] = 1
1256 else:
1256 else:
1257 if n[1] not in seen and n[1] not in fetch:
1257 if n[1] not in seen and n[1] not in fetch:
1258 if n[2] in m and n[3] in m:
1258 if n[2] in m and n[3] in m:
1259 self.ui.debug(_("found new changeset %s\n") %
1259 self.ui.debug(_("found new changeset %s\n") %
1260 short(n[1]))
1260 short(n[1]))
1261 fetch[n[1]] = 1 # earliest unknown
1261 fetch[n[1]] = 1 # earliest unknown
1262 for p in n[2:4]:
1262 for p in n[2:4]:
1263 if p in m:
1263 if p in m:
1264 base[p] = 1 # latest known
1264 base[p] = 1 # latest known
1265
1265
1266 for p in n[2:4]:
1266 for p in n[2:4]:
1267 if p not in req and p not in m:
1267 if p not in req and p not in m:
1268 r.append(p)
1268 r.append(p)
1269 req[p] = 1
1269 req[p] = 1
1270 seen[n[0]] = 1
1270 seen[n[0]] = 1
1271
1271
1272 if r:
1272 if r:
1273 reqcnt += 1
1273 reqcnt += 1
1274 self.ui.debug(_("request %d: %s\n") %
1274 self.ui.debug(_("request %d: %s\n") %
1275 (reqcnt, " ".join(map(short, r))))
1275 (reqcnt, " ".join(map(short, r))))
1276 for p in xrange(0, len(r), 10):
1276 for p in xrange(0, len(r), 10):
1277 for b in remote.branches(r[p:p+10]):
1277 for b in remote.branches(r[p:p+10]):
1278 self.ui.debug(_("received %s:%s\n") %
1278 self.ui.debug(_("received %s:%s\n") %
1279 (short(b[0]), short(b[1])))
1279 (short(b[0]), short(b[1])))
1280 unknown.append(b)
1280 unknown.append(b)
1281
1281
1282 # do binary search on the branches we found
1282 # do binary search on the branches we found
1283 while search:
1283 while search:
1284 n = search.pop(0)
1284 n = search.pop(0)
1285 reqcnt += 1
1285 reqcnt += 1
1286 l = remote.between([(n[0], n[1])])[0]
1286 l = remote.between([(n[0], n[1])])[0]
1287 l.append(n[1])
1287 l.append(n[1])
1288 p = n[0]
1288 p = n[0]
1289 f = 1
1289 f = 1
1290 for i in l:
1290 for i in l:
1291 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1291 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1292 if i in m:
1292 if i in m:
1293 if f <= 2:
1293 if f <= 2:
1294 self.ui.debug(_("found new branch changeset %s\n") %
1294 self.ui.debug(_("found new branch changeset %s\n") %
1295 short(p))
1295 short(p))
1296 fetch[p] = 1
1296 fetch[p] = 1
1297 base[i] = 1
1297 base[i] = 1
1298 else:
1298 else:
1299 self.ui.debug(_("narrowed branch search to %s:%s\n")
1299 self.ui.debug(_("narrowed branch search to %s:%s\n")
1300 % (short(p), short(i)))
1300 % (short(p), short(i)))
1301 search.append((p, i))
1301 search.append((p, i))
1302 break
1302 break
1303 p, f = i, f * 2
1303 p, f = i, f * 2
1304
1304
1305 # sanity check our fetch list
1305 # sanity check our fetch list
1306 for f in fetch.keys():
1306 for f in fetch.keys():
1307 if f in m:
1307 if f in m:
1308 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1308 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1309
1309
1310 if base.keys() == [nullid]:
1310 if base.keys() == [nullid]:
1311 if force:
1311 if force:
1312 self.ui.warn(_("warning: repository is unrelated\n"))
1312 self.ui.warn(_("warning: repository is unrelated\n"))
1313 else:
1313 else:
1314 raise util.Abort(_("repository is unrelated"))
1314 raise util.Abort(_("repository is unrelated"))
1315
1315
1316 self.ui.debug(_("found new changesets starting at ") +
1316 self.ui.debug(_("found new changesets starting at ") +
1317 " ".join([short(f) for f in fetch]) + "\n")
1317 " ".join([short(f) for f in fetch]) + "\n")
1318
1318
1319 self.ui.debug(_("%d total queries\n") % reqcnt)
1319 self.ui.debug(_("%d total queries\n") % reqcnt)
1320
1320
1321 return fetch.keys()
1321 return fetch.keys()
1322
1322
1323 def findoutgoing(self, remote, base=None, heads=None, force=False):
1323 def findoutgoing(self, remote, base=None, heads=None, force=False):
1324 """Return list of nodes that are roots of subsets not in remote
1324 """Return list of nodes that are roots of subsets not in remote
1325
1325
1326 If base dict is specified, assume that these nodes and their parents
1326 If base dict is specified, assume that these nodes and their parents
1327 exist on the remote side.
1327 exist on the remote side.
1328 If a list of heads is specified, return only nodes which are heads
1328 If a list of heads is specified, return only nodes which are heads
1329 or ancestors of these heads, and return a second element which
1329 or ancestors of these heads, and return a second element which
1330 contains all remote heads which get new children.
1330 contains all remote heads which get new children.
1331 """
1331 """
1332 if base == None:
1332 if base == None:
1333 base = {}
1333 base = {}
1334 self.findincoming(remote, base, heads, force=force)
1334 self.findincoming(remote, base, heads, force=force)
1335
1335
1336 self.ui.debug(_("common changesets up to ")
1336 self.ui.debug(_("common changesets up to ")
1337 + " ".join(map(short, base.keys())) + "\n")
1337 + " ".join(map(short, base.keys())) + "\n")
1338
1338
1339 remain = dict.fromkeys(self.changelog.nodemap)
1339 remain = dict.fromkeys(self.changelog.nodemap)
1340
1340
1341 # prune everything remote has from the tree
1341 # prune everything remote has from the tree
1342 del remain[nullid]
1342 del remain[nullid]
1343 remove = base.keys()
1343 remove = base.keys()
1344 while remove:
1344 while remove:
1345 n = remove.pop(0)
1345 n = remove.pop(0)
1346 if n in remain:
1346 if n in remain:
1347 del remain[n]
1347 del remain[n]
1348 for p in self.changelog.parents(n):
1348 for p in self.changelog.parents(n):
1349 remove.append(p)
1349 remove.append(p)
1350
1350
1351 # find every node whose parents have been pruned
1351 # find every node whose parents have been pruned
1352 subset = []
1352 subset = []
1353 # find every remote head that will get new children
1353 # find every remote head that will get new children
1354 updated_heads = {}
1354 updated_heads = {}
1355 for n in remain:
1355 for n in remain:
1356 p1, p2 = self.changelog.parents(n)
1356 p1, p2 = self.changelog.parents(n)
1357 if p1 not in remain and p2 not in remain:
1357 if p1 not in remain and p2 not in remain:
1358 subset.append(n)
1358 subset.append(n)
1359 if heads:
1359 if heads:
1360 if p1 in heads:
1360 if p1 in heads:
1361 updated_heads[p1] = True
1361 updated_heads[p1] = True
1362 if p2 in heads:
1362 if p2 in heads:
1363 updated_heads[p2] = True
1363 updated_heads[p2] = True
1364
1364
1365 # this is the set of all roots we have to push
1365 # this is the set of all roots we have to push
1366 if heads:
1366 if heads:
1367 return subset, updated_heads.keys()
1367 return subset, updated_heads.keys()
1368 else:
1368 else:
1369 return subset
1369 return subset
1370
1370
1371 def pull(self, remote, heads=None, force=False, lock=None):
1371 def pull(self, remote, heads=None, force=False, lock=None):
1372 mylock = False
1372 mylock = False
1373 if not lock:
1373 if not lock:
1374 lock = self.lock()
1374 lock = self.lock()
1375 mylock = True
1375 mylock = True
1376
1376
1377 try:
1377 try:
1378 fetch = self.findincoming(remote, force=force)
1378 fetch = self.findincoming(remote, force=force)
1379 if fetch == [nullid]:
1379 if fetch == [nullid]:
1380 self.ui.status(_("requesting all changes\n"))
1380 self.ui.status(_("requesting all changes\n"))
1381
1381
1382 if not fetch:
1382 if not fetch:
1383 self.ui.status(_("no changes found\n"))
1383 self.ui.status(_("no changes found\n"))
1384 return 0
1384 return 0
1385
1385
1386 if heads is None:
1386 if heads is None:
1387 cg = remote.changegroup(fetch, 'pull')
1387 cg = remote.changegroup(fetch, 'pull')
1388 else:
1388 else:
1389 if 'changegroupsubset' not in remote.capabilities:
1389 if 'changegroupsubset' not in remote.capabilities:
1390 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1390 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1391 cg = remote.changegroupsubset(fetch, heads, 'pull')
1391 cg = remote.changegroupsubset(fetch, heads, 'pull')
1392 return self.addchangegroup(cg, 'pull', remote.url())
1392 return self.addchangegroup(cg, 'pull', remote.url())
1393 finally:
1393 finally:
1394 if mylock:
1394 if mylock:
1395 lock.release()
1395 lock.release()
1396
1396
1397 def push(self, remote, force=False, revs=None):
1397 def push(self, remote, force=False, revs=None):
1398 # there are two ways to push to remote repo:
1398 # there are two ways to push to remote repo:
1399 #
1399 #
1400 # addchangegroup assumes local user can lock remote
1400 # addchangegroup assumes local user can lock remote
1401 # repo (local filesystem, old ssh servers).
1401 # repo (local filesystem, old ssh servers).
1402 #
1402 #
1403 # unbundle assumes local user cannot lock remote repo (new ssh
1403 # unbundle assumes local user cannot lock remote repo (new ssh
1404 # servers, http servers).
1404 # servers, http servers).
1405
1405
1406 if remote.capable('unbundle'):
1406 if remote.capable('unbundle'):
1407 return self.push_unbundle(remote, force, revs)
1407 return self.push_unbundle(remote, force, revs)
1408 return self.push_addchangegroup(remote, force, revs)
1408 return self.push_addchangegroup(remote, force, revs)
1409
1409
1410 def prepush(self, remote, force, revs):
1410 def prepush(self, remote, force, revs):
1411 base = {}
1411 base = {}
1412 remote_heads = remote.heads()
1412 remote_heads = remote.heads()
1413 inc = self.findincoming(remote, base, remote_heads, force=force)
1413 inc = self.findincoming(remote, base, remote_heads, force=force)
1414
1414
1415 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1415 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1416 if revs is not None:
1416 if revs is not None:
1417 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1417 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1418 else:
1418 else:
1419 bases, heads = update, self.changelog.heads()
1419 bases, heads = update, self.changelog.heads()
1420
1420
1421 if not bases:
1421 if not bases:
1422 self.ui.status(_("no changes found\n"))
1422 self.ui.status(_("no changes found\n"))
1423 return None, 1
1423 return None, 1
1424 elif not force:
1424 elif not force:
1425 # check if we're creating new remote heads
1425 # check if we're creating new remote heads
1426 # to be a remote head after push, node must be either
1426 # to be a remote head after push, node must be either
1427 # - unknown locally
1427 # - unknown locally
1428 # - a local outgoing head descended from update
1428 # - a local outgoing head descended from update
1429 # - a remote head that's known locally and not
1429 # - a remote head that's known locally and not
1430 # ancestral to an outgoing head
1430 # ancestral to an outgoing head
1431
1431
1432 warn = 0
1432 warn = 0
1433
1433
1434 if remote_heads == [nullid]:
1434 if remote_heads == [nullid]:
1435 warn = 0
1435 warn = 0
1436 elif not revs and len(heads) > len(remote_heads):
1436 elif not revs and len(heads) > len(remote_heads):
1437 warn = 1
1437 warn = 1
1438 else:
1438 else:
1439 newheads = list(heads)
1439 newheads = list(heads)
1440 for r in remote_heads:
1440 for r in remote_heads:
1441 if r in self.changelog.nodemap:
1441 if r in self.changelog.nodemap:
1442 desc = self.changelog.heads(r, heads)
1442 desc = self.changelog.heads(r, heads)
1443 l = [h for h in heads if h in desc]
1443 l = [h for h in heads if h in desc]
1444 if not l:
1444 if not l:
1445 newheads.append(r)
1445 newheads.append(r)
1446 else:
1446 else:
1447 newheads.append(r)
1447 newheads.append(r)
1448 if len(newheads) > len(remote_heads):
1448 if len(newheads) > len(remote_heads):
1449 warn = 1
1449 warn = 1
1450
1450
1451 if warn:
1451 if warn:
1452 self.ui.warn(_("abort: push creates new remote branches!\n"))
1452 self.ui.warn(_("abort: push creates new remote branches!\n"))
1453 self.ui.status(_("(did you forget to merge?"
1453 self.ui.status(_("(did you forget to merge?"
1454 " use push -f to force)\n"))
1454 " use push -f to force)\n"))
1455 return None, 1
1455 return None, 1
1456 elif inc:
1456 elif inc:
1457 self.ui.warn(_("note: unsynced remote changes!\n"))
1457 self.ui.warn(_("note: unsynced remote changes!\n"))
1458
1458
1459
1459
1460 if revs is None:
1460 if revs is None:
1461 cg = self.changegroup(update, 'push')
1461 cg = self.changegroup(update, 'push')
1462 else:
1462 else:
1463 cg = self.changegroupsubset(update, revs, 'push')
1463 cg = self.changegroupsubset(update, revs, 'push')
1464 return cg, remote_heads
1464 return cg, remote_heads
1465
1465
1466 def push_addchangegroup(self, remote, force, revs):
1466 def push_addchangegroup(self, remote, force, revs):
1467 lock = remote.lock()
1467 lock = remote.lock()
1468
1468
1469 ret = self.prepush(remote, force, revs)
1469 ret = self.prepush(remote, force, revs)
1470 if ret[0] is not None:
1470 if ret[0] is not None:
1471 cg, remote_heads = ret
1471 cg, remote_heads = ret
1472 return remote.addchangegroup(cg, 'push', self.url())
1472 return remote.addchangegroup(cg, 'push', self.url())
1473 return ret[1]
1473 return ret[1]
1474
1474
1475 def push_unbundle(self, remote, force, revs):
1475 def push_unbundle(self, remote, force, revs):
1476 # local repo finds heads on server, finds out what revs it
1476 # local repo finds heads on server, finds out what revs it
1477 # must push. once revs transferred, if server finds it has
1477 # must push. once revs transferred, if server finds it has
1478 # different heads (someone else won commit/push race), server
1478 # different heads (someone else won commit/push race), server
1479 # aborts.
1479 # aborts.
1480
1480
1481 ret = self.prepush(remote, force, revs)
1481 ret = self.prepush(remote, force, revs)
1482 if ret[0] is not None:
1482 if ret[0] is not None:
1483 cg, remote_heads = ret
1483 cg, remote_heads = ret
1484 if force: remote_heads = ['force']
1484 if force: remote_heads = ['force']
1485 return remote.unbundle(cg, remote_heads, 'push')
1485 return remote.unbundle(cg, remote_heads, 'push')
1486 return ret[1]
1486 return ret[1]
1487
1487
1488 def changegroupinfo(self, nodes):
1488 def changegroupinfo(self, nodes):
1489 self.ui.note(_("%d changesets found\n") % len(nodes))
1489 self.ui.note(_("%d changesets found\n") % len(nodes))
1490 if self.ui.debugflag:
1490 if self.ui.debugflag:
1491 self.ui.debug(_("List of changesets:\n"))
1491 self.ui.debug(_("List of changesets:\n"))
1492 for node in nodes:
1492 for node in nodes:
1493 self.ui.debug("%s\n" % hex(node))
1493 self.ui.debug("%s\n" % hex(node))
1494
1494
1495 def changegroupsubset(self, bases, heads, source):
1495 def changegroupsubset(self, bases, heads, source):
1496 """This function generates a changegroup consisting of all the nodes
1496 """This function generates a changegroup consisting of all the nodes
1497 that are descendents of any of the bases, and ancestors of any of
1497 that are descendents of any of the bases, and ancestors of any of
1498 the heads.
1498 the heads.
1499
1499
1500 It is fairly complex as determining which filenodes and which
1500 It is fairly complex as determining which filenodes and which
1501 manifest nodes need to be included for the changeset to be complete
1501 manifest nodes need to be included for the changeset to be complete
1502 is non-trivial.
1502 is non-trivial.
1503
1503
1504 Another wrinkle is doing the reverse, figuring out which changeset in
1504 Another wrinkle is doing the reverse, figuring out which changeset in
1505 the changegroup a particular filenode or manifestnode belongs to."""
1505 the changegroup a particular filenode or manifestnode belongs to."""
1506
1506
1507 self.hook('preoutgoing', throw=True, source=source)
1507 self.hook('preoutgoing', throw=True, source=source)
1508
1508
1509 # Set up some initial variables
1509 # Set up some initial variables
1510 # Make it easy to refer to self.changelog
1510 # Make it easy to refer to self.changelog
1511 cl = self.changelog
1511 cl = self.changelog
1512 # msng is short for missing - compute the list of changesets in this
1512 # msng is short for missing - compute the list of changesets in this
1513 # changegroup.
1513 # changegroup.
1514 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1514 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1515 self.changegroupinfo(msng_cl_lst)
1515 self.changegroupinfo(msng_cl_lst)
1516 # Some bases may turn out to be superfluous, and some heads may be
1516 # Some bases may turn out to be superfluous, and some heads may be
1517 # too. nodesbetween will return the minimal set of bases and heads
1517 # too. nodesbetween will return the minimal set of bases and heads
1518 # necessary to re-create the changegroup.
1518 # necessary to re-create the changegroup.
1519
1519
1520 # Known heads are the list of heads that it is assumed the recipient
1520 # Known heads are the list of heads that it is assumed the recipient
1521 # of this changegroup will know about.
1521 # of this changegroup will know about.
1522 knownheads = {}
1522 knownheads = {}
1523 # We assume that all parents of bases are known heads.
1523 # We assume that all parents of bases are known heads.
1524 for n in bases:
1524 for n in bases:
1525 for p in cl.parents(n):
1525 for p in cl.parents(n):
1526 if p != nullid:
1526 if p != nullid:
1527 knownheads[p] = 1
1527 knownheads[p] = 1
1528 knownheads = knownheads.keys()
1528 knownheads = knownheads.keys()
1529 if knownheads:
1529 if knownheads:
1530 # Now that we know what heads are known, we can compute which
1530 # Now that we know what heads are known, we can compute which
1531 # changesets are known. The recipient must know about all
1531 # changesets are known. The recipient must know about all
1532 # changesets required to reach the known heads from the null
1532 # changesets required to reach the known heads from the null
1533 # changeset.
1533 # changeset.
1534 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1534 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1535 junk = None
1535 junk = None
1536 # Transform the list into an ersatz set.
1536 # Transform the list into an ersatz set.
1537 has_cl_set = dict.fromkeys(has_cl_set)
1537 has_cl_set = dict.fromkeys(has_cl_set)
1538 else:
1538 else:
1539 # If there were no known heads, the recipient cannot be assumed to
1539 # If there were no known heads, the recipient cannot be assumed to
1540 # know about any changesets.
1540 # know about any changesets.
1541 has_cl_set = {}
1541 has_cl_set = {}
1542
1542
1543 # Make it easy to refer to self.manifest
1543 # Make it easy to refer to self.manifest
1544 mnfst = self.manifest
1544 mnfst = self.manifest
1545 # We don't know which manifests are missing yet
1545 # We don't know which manifests are missing yet
1546 msng_mnfst_set = {}
1546 msng_mnfst_set = {}
1547 # Nor do we know which filenodes are missing.
1547 # Nor do we know which filenodes are missing.
1548 msng_filenode_set = {}
1548 msng_filenode_set = {}
1549
1549
1550 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1550 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1551 junk = None
1551 junk = None
1552
1552
1553 # A changeset always belongs to itself, so the changenode lookup
1553 # A changeset always belongs to itself, so the changenode lookup
1554 # function for a changenode is identity.
1554 # function for a changenode is identity.
1555 def identity(x):
1555 def identity(x):
1556 return x
1556 return x
1557
1557
1558 # A function generating function. Sets up an environment for the
1558 # A function generating function. Sets up an environment for the
1559 # inner function.
1559 # inner function.
1560 def cmp_by_rev_func(revlog):
1560 def cmp_by_rev_func(revlog):
1561 # Compare two nodes by their revision number in the environment's
1561 # Compare two nodes by their revision number in the environment's
1562 # revision history. Since the revision number both represents the
1562 # revision history. Since the revision number both represents the
1563 # most efficient order to read the nodes in, and represents a
1563 # most efficient order to read the nodes in, and represents a
1564 # topological sorting of the nodes, this function is often useful.
1564 # topological sorting of the nodes, this function is often useful.
1565 def cmp_by_rev(a, b):
1565 def cmp_by_rev(a, b):
1566 return cmp(revlog.rev(a), revlog.rev(b))
1566 return cmp(revlog.rev(a), revlog.rev(b))
1567 return cmp_by_rev
1567 return cmp_by_rev
1568
1568
1569 # If we determine that a particular file or manifest node must be a
1569 # If we determine that a particular file or manifest node must be a
1570 # node that the recipient of the changegroup will already have, we can
1570 # node that the recipient of the changegroup will already have, we can
1571 # also assume the recipient will have all the parents. This function
1571 # also assume the recipient will have all the parents. This function
1572 # prunes them from the set of missing nodes.
1572 # prunes them from the set of missing nodes.
1573 def prune_parents(revlog, hasset, msngset):
1573 def prune_parents(revlog, hasset, msngset):
1574 haslst = hasset.keys()
1574 haslst = hasset.keys()
1575 haslst.sort(cmp_by_rev_func(revlog))
1575 haslst.sort(cmp_by_rev_func(revlog))
1576 for node in haslst:
1576 for node in haslst:
1577 parentlst = [p for p in revlog.parents(node) if p != nullid]
1577 parentlst = [p for p in revlog.parents(node) if p != nullid]
1578 while parentlst:
1578 while parentlst:
1579 n = parentlst.pop()
1579 n = parentlst.pop()
1580 if n not in hasset:
1580 if n not in hasset:
1581 hasset[n] = 1
1581 hasset[n] = 1
1582 p = [p for p in revlog.parents(n) if p != nullid]
1582 p = [p for p in revlog.parents(n) if p != nullid]
1583 parentlst.extend(p)
1583 parentlst.extend(p)
1584 for n in hasset:
1584 for n in hasset:
1585 msngset.pop(n, None)
1585 msngset.pop(n, None)
1586
1586
1587 # This is a function generating function used to set up an environment
1587 # This is a function generating function used to set up an environment
1588 # for the inner function to execute in.
1588 # for the inner function to execute in.
1589 def manifest_and_file_collector(changedfileset):
1589 def manifest_and_file_collector(changedfileset):
1590 # This is an information gathering function that gathers
1590 # This is an information gathering function that gathers
1591 # information from each changeset node that goes out as part of
1591 # information from each changeset node that goes out as part of
1592 # the changegroup. The information gathered is a list of which
1592 # the changegroup. The information gathered is a list of which
1593 # manifest nodes are potentially required (the recipient may
1593 # manifest nodes are potentially required (the recipient may
1594 # already have them) and total list of all files which were
1594 # already have them) and total list of all files which were
1595 # changed in any changeset in the changegroup.
1595 # changed in any changeset in the changegroup.
1596 #
1596 #
1597 # We also remember the first changenode we saw any manifest
1597 # We also remember the first changenode we saw any manifest
1598 # referenced by so we can later determine which changenode 'owns'
1598 # referenced by so we can later determine which changenode 'owns'
1599 # the manifest.
1599 # the manifest.
1600 def collect_manifests_and_files(clnode):
1600 def collect_manifests_and_files(clnode):
1601 c = cl.read(clnode)
1601 c = cl.read(clnode)
1602 for f in c[3]:
1602 for f in c[3]:
1603 # This is to make sure we only have one instance of each
1603 # This is to make sure we only have one instance of each
1604 # filename string for each filename.
1604 # filename string for each filename.
1605 changedfileset.setdefault(f, f)
1605 changedfileset.setdefault(f, f)
1606 msng_mnfst_set.setdefault(c[0], clnode)
1606 msng_mnfst_set.setdefault(c[0], clnode)
1607 return collect_manifests_and_files
1607 return collect_manifests_and_files
1608
1608
1609 # Figure out which manifest nodes (of the ones we think might be part
1609 # Figure out which manifest nodes (of the ones we think might be part
1610 # of the changegroup) the recipient must know about and remove them
1610 # of the changegroup) the recipient must know about and remove them
1611 # from the changegroup.
1611 # from the changegroup.
1612 def prune_manifests():
1612 def prune_manifests():
1613 has_mnfst_set = {}
1613 has_mnfst_set = {}
1614 for n in msng_mnfst_set:
1614 for n in msng_mnfst_set:
1615 # If a 'missing' manifest thinks it belongs to a changenode
1615 # If a 'missing' manifest thinks it belongs to a changenode
1616 # the recipient is assumed to have, obviously the recipient
1616 # the recipient is assumed to have, obviously the recipient
1617 # must have that manifest.
1617 # must have that manifest.
1618 linknode = cl.node(mnfst.linkrev(n))
1618 linknode = cl.node(mnfst.linkrev(n))
1619 if linknode in has_cl_set:
1619 if linknode in has_cl_set:
1620 has_mnfst_set[n] = 1
1620 has_mnfst_set[n] = 1
1621 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1621 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1622
1622
1623 # Use the information collected in collect_manifests_and_files to say
1623 # Use the information collected in collect_manifests_and_files to say
1624 # which changenode any manifestnode belongs to.
1624 # which changenode any manifestnode belongs to.
1625 def lookup_manifest_link(mnfstnode):
1625 def lookup_manifest_link(mnfstnode):
1626 return msng_mnfst_set[mnfstnode]
1626 return msng_mnfst_set[mnfstnode]
1627
1627
1628 # A function generating function that sets up the initial environment
1628 # A function generating function that sets up the initial environment
1629 # the inner function.
1629 # the inner function.
1630 def filenode_collector(changedfiles):
1630 def filenode_collector(changedfiles):
1631 next_rev = [0]
1631 next_rev = [0]
1632 # This gathers information from each manifestnode included in the
1632 # This gathers information from each manifestnode included in the
1633 # changegroup about which filenodes the manifest node references
1633 # changegroup about which filenodes the manifest node references
1634 # so we can include those in the changegroup too.
1634 # so we can include those in the changegroup too.
1635 #
1635 #
1636 # It also remembers which changenode each filenode belongs to. It
1636 # It also remembers which changenode each filenode belongs to. It
1637 # does this by assuming the a filenode belongs to the changenode
1637 # does this by assuming the a filenode belongs to the changenode
1638 # the first manifest that references it belongs to.
1638 # the first manifest that references it belongs to.
1639 def collect_msng_filenodes(mnfstnode):
1639 def collect_msng_filenodes(mnfstnode):
1640 r = mnfst.rev(mnfstnode)
1640 r = mnfst.rev(mnfstnode)
1641 if r == next_rev[0]:
1641 if r == next_rev[0]:
1642 # If the last rev we looked at was the one just previous,
1642 # If the last rev we looked at was the one just previous,
1643 # we only need to see a diff.
1643 # we only need to see a diff.
1644 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1644 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1645 # For each line in the delta
1645 # For each line in the delta
1646 for dline in delta.splitlines():
1646 for dline in delta.splitlines():
1647 # get the filename and filenode for that line
1647 # get the filename and filenode for that line
1648 f, fnode = dline.split('\0')
1648 f, fnode = dline.split('\0')
1649 fnode = bin(fnode[:40])
1649 fnode = bin(fnode[:40])
1650 f = changedfiles.get(f, None)
1650 f = changedfiles.get(f, None)
1651 # And if the file is in the list of files we care
1651 # And if the file is in the list of files we care
1652 # about.
1652 # about.
1653 if f is not None:
1653 if f is not None:
1654 # Get the changenode this manifest belongs to
1654 # Get the changenode this manifest belongs to
1655 clnode = msng_mnfst_set[mnfstnode]
1655 clnode = msng_mnfst_set[mnfstnode]
1656 # Create the set of filenodes for the file if
1656 # Create the set of filenodes for the file if
1657 # there isn't one already.
1657 # there isn't one already.
1658 ndset = msng_filenode_set.setdefault(f, {})
1658 ndset = msng_filenode_set.setdefault(f, {})
1659 # And set the filenode's changelog node to the
1659 # And set the filenode's changelog node to the
1660 # manifest's if it hasn't been set already.
1660 # manifest's if it hasn't been set already.
1661 ndset.setdefault(fnode, clnode)
1661 ndset.setdefault(fnode, clnode)
1662 else:
1662 else:
1663 # Otherwise we need a full manifest.
1663 # Otherwise we need a full manifest.
1664 m = mnfst.read(mnfstnode)
1664 m = mnfst.read(mnfstnode)
1665 # For every file in we care about.
1665 # For every file in we care about.
1666 for f in changedfiles:
1666 for f in changedfiles:
1667 fnode = m.get(f, None)
1667 fnode = m.get(f, None)
1668 # If it's in the manifest
1668 # If it's in the manifest
1669 if fnode is not None:
1669 if fnode is not None:
1670 # See comments above.
1670 # See comments above.
1671 clnode = msng_mnfst_set[mnfstnode]
1671 clnode = msng_mnfst_set[mnfstnode]
1672 ndset = msng_filenode_set.setdefault(f, {})
1672 ndset = msng_filenode_set.setdefault(f, {})
1673 ndset.setdefault(fnode, clnode)
1673 ndset.setdefault(fnode, clnode)
1674 # Remember the revision we hope to see next.
1674 # Remember the revision we hope to see next.
1675 next_rev[0] = r + 1
1675 next_rev[0] = r + 1
1676 return collect_msng_filenodes
1676 return collect_msng_filenodes
1677
1677
1678 # We have a list of filenodes we think we need for a file, lets remove
1678 # We have a list of filenodes we think we need for a file, lets remove
1679 # all those we now the recipient must have.
1679 # all those we now the recipient must have.
1680 def prune_filenodes(f, filerevlog):
1680 def prune_filenodes(f, filerevlog):
1681 msngset = msng_filenode_set[f]
1681 msngset = msng_filenode_set[f]
1682 hasset = {}
1682 hasset = {}
1683 # If a 'missing' filenode thinks it belongs to a changenode we
1683 # If a 'missing' filenode thinks it belongs to a changenode we
1684 # assume the recipient must have, then the recipient must have
1684 # assume the recipient must have, then the recipient must have
1685 # that filenode.
1685 # that filenode.
1686 for n in msngset:
1686 for n in msngset:
1687 clnode = cl.node(filerevlog.linkrev(n))
1687 clnode = cl.node(filerevlog.linkrev(n))
1688 if clnode in has_cl_set:
1688 if clnode in has_cl_set:
1689 hasset[n] = 1
1689 hasset[n] = 1
1690 prune_parents(filerevlog, hasset, msngset)
1690 prune_parents(filerevlog, hasset, msngset)
1691
1691
1692 # A function generator function that sets up the a context for the
1692 # A function generator function that sets up the a context for the
1693 # inner function.
1693 # inner function.
1694 def lookup_filenode_link_func(fname):
1694 def lookup_filenode_link_func(fname):
1695 msngset = msng_filenode_set[fname]
1695 msngset = msng_filenode_set[fname]
1696 # Lookup the changenode the filenode belongs to.
1696 # Lookup the changenode the filenode belongs to.
1697 def lookup_filenode_link(fnode):
1697 def lookup_filenode_link(fnode):
1698 return msngset[fnode]
1698 return msngset[fnode]
1699 return lookup_filenode_link
1699 return lookup_filenode_link
1700
1700
1701 # Now that we have all theses utility functions to help out and
1701 # Now that we have all theses utility functions to help out and
1702 # logically divide up the task, generate the group.
1702 # logically divide up the task, generate the group.
1703 def gengroup():
1703 def gengroup():
1704 # The set of changed files starts empty.
1704 # The set of changed files starts empty.
1705 changedfiles = {}
1705 changedfiles = {}
1706 # Create a changenode group generator that will call our functions
1706 # Create a changenode group generator that will call our functions
1707 # back to lookup the owning changenode and collect information.
1707 # back to lookup the owning changenode and collect information.
1708 group = cl.group(msng_cl_lst, identity,
1708 group = cl.group(msng_cl_lst, identity,
1709 manifest_and_file_collector(changedfiles))
1709 manifest_and_file_collector(changedfiles))
1710 for chnk in group:
1710 for chnk in group:
1711 yield chnk
1711 yield chnk
1712
1712
1713 # The list of manifests has been collected by the generator
1713 # The list of manifests has been collected by the generator
1714 # calling our functions back.
1714 # calling our functions back.
1715 prune_manifests()
1715 prune_manifests()
1716 msng_mnfst_lst = msng_mnfst_set.keys()
1716 msng_mnfst_lst = msng_mnfst_set.keys()
1717 # Sort the manifestnodes by revision number.
1717 # Sort the manifestnodes by revision number.
1718 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1718 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1719 # Create a generator for the manifestnodes that calls our lookup
1719 # Create a generator for the manifestnodes that calls our lookup
1720 # and data collection functions back.
1720 # and data collection functions back.
1721 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1721 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1722 filenode_collector(changedfiles))
1722 filenode_collector(changedfiles))
1723 for chnk in group:
1723 for chnk in group:
1724 yield chnk
1724 yield chnk
1725
1725
1726 # These are no longer needed, dereference and toss the memory for
1726 # These are no longer needed, dereference and toss the memory for
1727 # them.
1727 # them.
1728 msng_mnfst_lst = None
1728 msng_mnfst_lst = None
1729 msng_mnfst_set.clear()
1729 msng_mnfst_set.clear()
1730
1730
1731 changedfiles = changedfiles.keys()
1731 changedfiles = changedfiles.keys()
1732 changedfiles.sort()
1732 changedfiles.sort()
1733 # Go through all our files in order sorted by name.
1733 # Go through all our files in order sorted by name.
1734 for fname in changedfiles:
1734 for fname in changedfiles:
1735 filerevlog = self.file(fname)
1735 filerevlog = self.file(fname)
1736 # Toss out the filenodes that the recipient isn't really
1736 # Toss out the filenodes that the recipient isn't really
1737 # missing.
1737 # missing.
1738 if msng_filenode_set.has_key(fname):
1738 if msng_filenode_set.has_key(fname):
1739 prune_filenodes(fname, filerevlog)
1739 prune_filenodes(fname, filerevlog)
1740 msng_filenode_lst = msng_filenode_set[fname].keys()
1740 msng_filenode_lst = msng_filenode_set[fname].keys()
1741 else:
1741 else:
1742 msng_filenode_lst = []
1742 msng_filenode_lst = []
1743 # If any filenodes are left, generate the group for them,
1743 # If any filenodes are left, generate the group for them,
1744 # otherwise don't bother.
1744 # otherwise don't bother.
1745 if len(msng_filenode_lst) > 0:
1745 if len(msng_filenode_lst) > 0:
1746 yield changegroup.genchunk(fname)
1746 yield changegroup.genchunk(fname)
1747 # Sort the filenodes by their revision #
1747 # Sort the filenodes by their revision #
1748 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1748 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1749 # Create a group generator and only pass in a changenode
1749 # Create a group generator and only pass in a changenode
1750 # lookup function as we need to collect no information
1750 # lookup function as we need to collect no information
1751 # from filenodes.
1751 # from filenodes.
1752 group = filerevlog.group(msng_filenode_lst,
1752 group = filerevlog.group(msng_filenode_lst,
1753 lookup_filenode_link_func(fname))
1753 lookup_filenode_link_func(fname))
1754 for chnk in group:
1754 for chnk in group:
1755 yield chnk
1755 yield chnk
1756 if msng_filenode_set.has_key(fname):
1756 if msng_filenode_set.has_key(fname):
1757 # Don't need this anymore, toss it to free memory.
1757 # Don't need this anymore, toss it to free memory.
1758 del msng_filenode_set[fname]
1758 del msng_filenode_set[fname]
1759 # Signal that no more groups are left.
1759 # Signal that no more groups are left.
1760 yield changegroup.closechunk()
1760 yield changegroup.closechunk()
1761
1761
1762 if msng_cl_lst:
1762 if msng_cl_lst:
1763 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1763 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1764
1764
1765 return util.chunkbuffer(gengroup())
1765 return util.chunkbuffer(gengroup())
1766
1766
1767 def changegroup(self, basenodes, source):
1767 def changegroup(self, basenodes, source):
1768 """Generate a changegroup of all nodes that we have that a recipient
1768 """Generate a changegroup of all nodes that we have that a recipient
1769 doesn't.
1769 doesn't.
1770
1770
1771 This is much easier than the previous function as we can assume that
1771 This is much easier than the previous function as we can assume that
1772 the recipient has any changenode we aren't sending them."""
1772 the recipient has any changenode we aren't sending them."""
1773
1773
1774 self.hook('preoutgoing', throw=True, source=source)
1774 self.hook('preoutgoing', throw=True, source=source)
1775
1775
1776 cl = self.changelog
1776 cl = self.changelog
1777 nodes = cl.nodesbetween(basenodes, None)[0]
1777 nodes = cl.nodesbetween(basenodes, None)[0]
1778 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1778 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1779 self.changegroupinfo(nodes)
1779 self.changegroupinfo(nodes)
1780
1780
1781 def identity(x):
1781 def identity(x):
1782 return x
1782 return x
1783
1783
1784 def gennodelst(revlog):
1784 def gennodelst(revlog):
1785 for r in xrange(0, revlog.count()):
1785 for r in xrange(0, revlog.count()):
1786 n = revlog.node(r)
1786 n = revlog.node(r)
1787 if revlog.linkrev(n) in revset:
1787 if revlog.linkrev(n) in revset:
1788 yield n
1788 yield n
1789
1789
1790 def changed_file_collector(changedfileset):
1790 def changed_file_collector(changedfileset):
1791 def collect_changed_files(clnode):
1791 def collect_changed_files(clnode):
1792 c = cl.read(clnode)
1792 c = cl.read(clnode)
1793 for fname in c[3]:
1793 for fname in c[3]:
1794 changedfileset[fname] = 1
1794 changedfileset[fname] = 1
1795 return collect_changed_files
1795 return collect_changed_files
1796
1796
1797 def lookuprevlink_func(revlog):
1797 def lookuprevlink_func(revlog):
1798 def lookuprevlink(n):
1798 def lookuprevlink(n):
1799 return cl.node(revlog.linkrev(n))
1799 return cl.node(revlog.linkrev(n))
1800 return lookuprevlink
1800 return lookuprevlink
1801
1801
1802 def gengroup():
1802 def gengroup():
1803 # construct a list of all changed files
1803 # construct a list of all changed files
1804 changedfiles = {}
1804 changedfiles = {}
1805
1805
1806 for chnk in cl.group(nodes, identity,
1806 for chnk in cl.group(nodes, identity,
1807 changed_file_collector(changedfiles)):
1807 changed_file_collector(changedfiles)):
1808 yield chnk
1808 yield chnk
1809 changedfiles = changedfiles.keys()
1809 changedfiles = changedfiles.keys()
1810 changedfiles.sort()
1810 changedfiles.sort()
1811
1811
1812 mnfst = self.manifest
1812 mnfst = self.manifest
1813 nodeiter = gennodelst(mnfst)
1813 nodeiter = gennodelst(mnfst)
1814 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1814 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1815 yield chnk
1815 yield chnk
1816
1816
1817 for fname in changedfiles:
1817 for fname in changedfiles:
1818 filerevlog = self.file(fname)
1818 filerevlog = self.file(fname)
1819 nodeiter = gennodelst(filerevlog)
1819 nodeiter = gennodelst(filerevlog)
1820 nodeiter = list(nodeiter)
1820 nodeiter = list(nodeiter)
1821 if nodeiter:
1821 if nodeiter:
1822 yield changegroup.genchunk(fname)
1822 yield changegroup.genchunk(fname)
1823 lookup = lookuprevlink_func(filerevlog)
1823 lookup = lookuprevlink_func(filerevlog)
1824 for chnk in filerevlog.group(nodeiter, lookup):
1824 for chnk in filerevlog.group(nodeiter, lookup):
1825 yield chnk
1825 yield chnk
1826
1826
1827 yield changegroup.closechunk()
1827 yield changegroup.closechunk()
1828
1828
1829 if nodes:
1829 if nodes:
1830 self.hook('outgoing', node=hex(nodes[0]), source=source)
1830 self.hook('outgoing', node=hex(nodes[0]), source=source)
1831
1831
1832 return util.chunkbuffer(gengroup())
1832 return util.chunkbuffer(gengroup())
1833
1833
1834 def addchangegroup(self, source, srctype, url):
1834 def addchangegroup(self, source, srctype, url):
1835 """add changegroup to repo.
1835 """add changegroup to repo.
1836
1836
1837 return values:
1837 return values:
1838 - nothing changed or no source: 0
1838 - nothing changed or no source: 0
1839 - more heads than before: 1+added heads (2..n)
1839 - more heads than before: 1+added heads (2..n)
1840 - less heads than before: -1-removed heads (-2..-n)
1840 - less heads than before: -1-removed heads (-2..-n)
1841 - number of heads stays the same: 1
1841 - number of heads stays the same: 1
1842 """
1842 """
1843 def csmap(x):
1843 def csmap(x):
1844 self.ui.debug(_("add changeset %s\n") % short(x))
1844 self.ui.debug(_("add changeset %s\n") % short(x))
1845 return cl.count()
1845 return cl.count()
1846
1846
1847 def revmap(x):
1847 def revmap(x):
1848 return cl.rev(x)
1848 return cl.rev(x)
1849
1849
1850 if not source:
1850 if not source:
1851 return 0
1851 return 0
1852
1852
1853 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1853 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1854
1854
1855 changesets = files = revisions = 0
1855 changesets = files = revisions = 0
1856
1856
1857 tr = self.transaction()
1857 tr = self.transaction()
1858
1858
1859 # write changelog data to temp files so concurrent readers will not see
1859 # write changelog data to temp files so concurrent readers will not see
1860 # inconsistent view
1860 # inconsistent view
1861 cl = None
1861 cl = None
1862 try:
1862 try:
1863 cl = appendfile.appendchangelog(self.sopener,
1863 cl = appendfile.appendchangelog(self.sopener,
1864 self.changelog.version)
1864 self.changelog.version)
1865
1865
1866 oldheads = len(cl.heads())
1866 oldheads = len(cl.heads())
1867
1867
1868 # pull off the changeset group
1868 # pull off the changeset group
1869 self.ui.status(_("adding changesets\n"))
1869 self.ui.status(_("adding changesets\n"))
1870 cor = cl.count() - 1
1870 cor = cl.count() - 1
1871 chunkiter = changegroup.chunkiter(source)
1871 chunkiter = changegroup.chunkiter(source)
1872 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1872 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1873 raise util.Abort(_("received changelog group is empty"))
1873 raise util.Abort(_("received changelog group is empty"))
1874 cnr = cl.count() - 1
1874 cnr = cl.count() - 1
1875 changesets = cnr - cor
1875 changesets = cnr - cor
1876
1876
1877 # pull off the manifest group
1877 # pull off the manifest group
1878 self.ui.status(_("adding manifests\n"))
1878 self.ui.status(_("adding manifests\n"))
1879 chunkiter = changegroup.chunkiter(source)
1879 chunkiter = changegroup.chunkiter(source)
1880 # no need to check for empty manifest group here:
1880 # no need to check for empty manifest group here:
1881 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1881 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1882 # no new manifest will be created and the manifest group will
1882 # no new manifest will be created and the manifest group will
1883 # be empty during the pull
1883 # be empty during the pull
1884 self.manifest.addgroup(chunkiter, revmap, tr)
1884 self.manifest.addgroup(chunkiter, revmap, tr)
1885
1885
1886 # process the files
1886 # process the files
1887 self.ui.status(_("adding file changes\n"))
1887 self.ui.status(_("adding file changes\n"))
1888 while 1:
1888 while 1:
1889 f = changegroup.getchunk(source)
1889 f = changegroup.getchunk(source)
1890 if not f:
1890 if not f:
1891 break
1891 break
1892 self.ui.debug(_("adding %s revisions\n") % f)
1892 self.ui.debug(_("adding %s revisions\n") % f)
1893 fl = self.file(f)
1893 fl = self.file(f)
1894 o = fl.count()
1894 o = fl.count()
1895 chunkiter = changegroup.chunkiter(source)
1895 chunkiter = changegroup.chunkiter(source)
1896 if fl.addgroup(chunkiter, revmap, tr) is None:
1896 if fl.addgroup(chunkiter, revmap, tr) is None:
1897 raise util.Abort(_("received file revlog group is empty"))
1897 raise util.Abort(_("received file revlog group is empty"))
1898 revisions += fl.count() - o
1898 revisions += fl.count() - o
1899 files += 1
1899 files += 1
1900
1900
1901 cl.writedata()
1901 cl.writedata()
1902 finally:
1902 finally:
1903 if cl:
1903 if cl:
1904 cl.cleanup()
1904 cl.cleanup()
1905
1905
1906 # make changelog see real files again
1906 # make changelog see real files again
1907 self.changelog = changelog.changelog(self.sopener,
1907 self.changelog = changelog.changelog(self.sopener,
1908 self.changelog.version)
1908 self.changelog.version)
1909 self.changelog.checkinlinesize(tr)
1909 self.changelog.checkinlinesize(tr)
1910
1910
1911 newheads = len(self.changelog.heads())
1911 newheads = len(self.changelog.heads())
1912 heads = ""
1912 heads = ""
1913 if oldheads and newheads != oldheads:
1913 if oldheads and newheads != oldheads:
1914 heads = _(" (%+d heads)") % (newheads - oldheads)
1914 heads = _(" (%+d heads)") % (newheads - oldheads)
1915
1915
1916 self.ui.status(_("added %d changesets"
1916 self.ui.status(_("added %d changesets"
1917 " with %d changes to %d files%s\n")
1917 " with %d changes to %d files%s\n")
1918 % (changesets, revisions, files, heads))
1918 % (changesets, revisions, files, heads))
1919
1919
1920 if changesets > 0:
1920 if changesets > 0:
1921 self.hook('pretxnchangegroup', throw=True,
1921 self.hook('pretxnchangegroup', throw=True,
1922 node=hex(self.changelog.node(cor+1)), source=srctype,
1922 node=hex(self.changelog.node(cor+1)), source=srctype,
1923 url=url)
1923 url=url)
1924
1924
1925 tr.close()
1925 tr.close()
1926
1926
1927 if changesets > 0:
1927 if changesets > 0:
1928 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1928 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1929 source=srctype, url=url)
1929 source=srctype, url=url)
1930
1930
1931 for i in xrange(cor + 1, cnr + 1):
1931 for i in xrange(cor + 1, cnr + 1):
1932 self.hook("incoming", node=hex(self.changelog.node(i)),
1932 self.hook("incoming", node=hex(self.changelog.node(i)),
1933 source=srctype, url=url)
1933 source=srctype, url=url)
1934
1934
1935 # never return 0 here:
1935 # never return 0 here:
1936 if newheads < oldheads:
1936 if newheads < oldheads:
1937 return newheads - oldheads - 1
1937 return newheads - oldheads - 1
1938 else:
1938 else:
1939 return newheads - oldheads + 1
1939 return newheads - oldheads + 1
1940
1940
1941
1941
1942 def stream_in(self, remote):
1942 def stream_in(self, remote):
1943 fp = remote.stream_out()
1943 fp = remote.stream_out()
1944 l = fp.readline()
1944 l = fp.readline()
1945 try:
1945 try:
1946 resp = int(l)
1946 resp = int(l)
1947 except ValueError:
1947 except ValueError:
1948 raise util.UnexpectedOutput(
1948 raise util.UnexpectedOutput(
1949 _('Unexpected response from remote server:'), l)
1949 _('Unexpected response from remote server:'), l)
1950 if resp == 1:
1950 if resp == 1:
1951 raise util.Abort(_('operation forbidden by server'))
1951 raise util.Abort(_('operation forbidden by server'))
1952 elif resp == 2:
1952 elif resp == 2:
1953 raise util.Abort(_('locking the remote repository failed'))
1953 raise util.Abort(_('locking the remote repository failed'))
1954 elif resp != 0:
1954 elif resp != 0:
1955 raise util.Abort(_('the server sent an unknown error code'))
1955 raise util.Abort(_('the server sent an unknown error code'))
1956 self.ui.status(_('streaming all changes\n'))
1956 self.ui.status(_('streaming all changes\n'))
1957 l = fp.readline()
1957 l = fp.readline()
1958 try:
1958 try:
1959 total_files, total_bytes = map(int, l.split(' ', 1))
1959 total_files, total_bytes = map(int, l.split(' ', 1))
1960 except ValueError, TypeError:
1960 except ValueError, TypeError:
1961 raise util.UnexpectedOutput(
1961 raise util.UnexpectedOutput(
1962 _('Unexpected response from remote server:'), l)
1962 _('Unexpected response from remote server:'), l)
1963 self.ui.status(_('%d files to transfer, %s of data\n') %
1963 self.ui.status(_('%d files to transfer, %s of data\n') %
1964 (total_files, util.bytecount(total_bytes)))
1964 (total_files, util.bytecount(total_bytes)))
1965 start = time.time()
1965 start = time.time()
1966 for i in xrange(total_files):
1966 for i in xrange(total_files):
1967 # XXX doesn't support '\n' or '\r' in filenames
1967 # XXX doesn't support '\n' or '\r' in filenames
1968 l = fp.readline()
1968 l = fp.readline()
1969 try:
1969 try:
1970 name, size = l.split('\0', 1)
1970 name, size = l.split('\0', 1)
1971 size = int(size)
1971 size = int(size)
1972 except ValueError, TypeError:
1972 except ValueError, TypeError:
1973 raise util.UnexpectedOutput(
1973 raise util.UnexpectedOutput(
1974 _('Unexpected response from remote server:'), l)
1974 _('Unexpected response from remote server:'), l)
1975 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1975 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1976 ofp = self.sopener(name, 'w')
1976 ofp = self.sopener(name, 'w')
1977 for chunk in util.filechunkiter(fp, limit=size):
1977 for chunk in util.filechunkiter(fp, limit=size):
1978 ofp.write(chunk)
1978 ofp.write(chunk)
1979 ofp.close()
1979 ofp.close()
1980 elapsed = time.time() - start
1980 elapsed = time.time() - start
1981 if elapsed <= 0:
1981 if elapsed <= 0:
1982 elapsed = 0.001
1982 elapsed = 0.001
1983 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1983 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1984 (util.bytecount(total_bytes), elapsed,
1984 (util.bytecount(total_bytes), elapsed,
1985 util.bytecount(total_bytes / elapsed)))
1985 util.bytecount(total_bytes / elapsed)))
1986 self.reload()
1986 self.reload()
1987 return len(self.heads()) + 1
1987 return len(self.heads()) + 1
1988
1988
1989 def clone(self, remote, heads=[], stream=False):
1989 def clone(self, remote, heads=[], stream=False):
1990 '''clone remote repository.
1990 '''clone remote repository.
1991
1991
1992 keyword arguments:
1992 keyword arguments:
1993 heads: list of revs to clone (forces use of pull)
1993 heads: list of revs to clone (forces use of pull)
1994 stream: use streaming clone if possible'''
1994 stream: use streaming clone if possible'''
1995
1995
1996 # now, all clients that can request uncompressed clones can
1996 # now, all clients that can request uncompressed clones can
1997 # read repo formats supported by all servers that can serve
1997 # read repo formats supported by all servers that can serve
1998 # them.
1998 # them.
1999
1999
2000 # if revlog format changes, client will have to check version
2000 # if revlog format changes, client will have to check version
2001 # and format flags on "stream" capability, and use
2001 # and format flags on "stream" capability, and use
2002 # uncompressed only if compatible.
2002 # uncompressed only if compatible.
2003
2003
2004 if stream and not heads and remote.capable('stream'):
2004 if stream and not heads and remote.capable('stream'):
2005 return self.stream_in(remote)
2005 return self.stream_in(remote)
2006 return self.pull(remote, heads)
2006 return self.pull(remote, heads)
2007
2007
2008 # used to avoid circular references so destructors work
2008 # used to avoid circular references so destructors work
2009 def aftertrans(files):
2009 def aftertrans(files):
2010 renamefiles = [tuple(t) for t in files]
2010 renamefiles = [tuple(t) for t in files]
2011 def a():
2011 def a():
2012 for src, dest in renamefiles:
2012 for src, dest in renamefiles:
2013 util.rename(src, dest)
2013 util.rename(src, dest)
2014 return a
2014 return a
2015
2015
2016 def instance(ui, path, create):
2016 def instance(ui, path, create):
2017 return localrepository(ui, util.drop_scheme('file', path), create)
2017 return localrepository(ui, util.drop_scheme('file', path), create)
2018
2018
2019 def islocal(path):
2019 def islocal(path):
2020 return True
2020 return True
@@ -1,107 +1,124 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir t
3 mkdir t
4 cd t
4 cd t
5 hg init
5 hg init
6 hg id
6 hg id
7 echo a > a
7 echo a > a
8 hg add a
8 hg add a
9 hg commit -m "test" -d "1000000 0"
9 hg commit -m "test" -d "1000000 0"
10 hg co
10 hg co
11 hg identify
11 hg identify
12 T=`hg tip --debug | head -n 1 | cut -d : -f 3`
12 T=`hg tip --debug | head -n 1 | cut -d : -f 3`
13 hg tag -l "This is a local tag with a really long name!"
13 hg tag -l "This is a local tag with a really long name!"
14 hg tags
14 hg tags
15 rm .hg/localtags
15 rm .hg/localtags
16 echo "$T first" > .hgtags
16 echo "$T first" > .hgtags
17 cat .hgtags
17 cat .hgtags
18 hg add .hgtags
18 hg add .hgtags
19 hg commit -m "add tags" -d "1000000 0"
19 hg commit -m "add tags" -d "1000000 0"
20 hg tags
20 hg tags
21 hg identify
21 hg identify
22 echo bb > a
22 echo bb > a
23 hg status
23 hg status
24 hg identify
24 hg identify
25 hg co first
25 hg co first
26 hg id
26 hg id
27 hg -v id
27 hg -v id
28 hg status
28 hg status
29 echo 1 > b
29 echo 1 > b
30 hg add b
30 hg add b
31 hg commit -m "branch" -d "1000000 0"
31 hg commit -m "branch" -d "1000000 0"
32 hg id
32 hg id
33 hg merge 1
33 hg merge 1
34 hg id
34 hg id
35 hg status
35 hg status
36
36
37 hg commit -m "merge" -d "1000000 0"
37 hg commit -m "merge" -d "1000000 0"
38
38
39 # create fake head, make sure tag not visible afterwards
39 # create fake head, make sure tag not visible afterwards
40 cp .hgtags tags
40 cp .hgtags tags
41 hg tag -d "1000000 0" last
41 hg tag -d "1000000 0" last
42 hg rm .hgtags
42 hg rm .hgtags
43 hg commit -m "remove" -d "1000000 0"
43 hg commit -m "remove" -d "1000000 0"
44
44
45 mv tags .hgtags
45 mv tags .hgtags
46 hg add .hgtags
46 hg add .hgtags
47 hg commit -m "readd" -d "1000000 0"
47 hg commit -m "readd" -d "1000000 0"
48
48
49 hg tags
49 hg tags
50
50
51 # invalid tags
51 # invalid tags
52 echo "spam" >> .hgtags
52 echo "spam" >> .hgtags
53 echo >> .hgtags
53 echo >> .hgtags
54 echo "foo bar" >> .hgtags
54 echo "foo bar" >> .hgtags
55 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
55 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
56 hg commit -m "tags" -d "1000000 0"
56 hg commit -m "tags" -d "1000000 0"
57
57
58 # report tag parse error on other head
58 # report tag parse error on other head
59 hg up 3
59 hg up 3
60 echo 'x y' >> .hgtags
60 echo 'x y' >> .hgtags
61 hg commit -m "head" -d "1000000 0"
61 hg commit -m "head" -d "1000000 0"
62
62
63 hg tags
63 hg tags
64 hg tip
64 hg tip
65
65
66 # test tag precedence rules
66 # test tag precedence rules
67 cd ..
67 cd ..
68 hg init t2
68 hg init t2
69 cd t2
69 cd t2
70 echo foo > foo
70 echo foo > foo
71 hg add foo
71 hg add foo
72 hg ci -m 'add foo' -d '1000000 0' # rev 0
72 hg ci -m 'add foo' -d '1000000 0' # rev 0
73 hg tag -d '1000000 0' bar # rev 1
73 hg tag -d '1000000 0' bar # rev 1
74 echo >> foo
74 echo >> foo
75 hg ci -m 'change foo 1' -d '1000000 0' # rev 2
75 hg ci -m 'change foo 1' -d '1000000 0' # rev 2
76 hg up -C 1
76 hg up -C 1
77 hg tag -r 1 -d '1000000 0' -f bar # rev 3
77 hg tag -r 1 -d '1000000 0' -f bar # rev 3
78 hg up -C 1
78 hg up -C 1
79 echo >> foo
79 echo >> foo
80 hg ci -m 'change foo 2' -d '1000000 0' # rev 4
80 hg ci -m 'change foo 2' -d '1000000 0' # rev 4
81 hg tags
81 hg tags
82
82
83 # test tag removal
83 # test tag removal
84 hg tag --remove -d '1000000 0' bar
84 hg tag --remove -d '1000000 0' bar
85 hg tip
85 hg tip
86 hg tags
86 hg tags
87
87
88 # test tag rank
88 # test tag rank
89 cd ..
89 cd ..
90 hg init t3
90 hg init t3
91 cd t3
91 cd t3
92 echo foo > foo
92 echo foo > foo
93 hg add foo
93 hg add foo
94 hg ci -m 'add foo' -d '1000000 0' # rev 0
94 hg ci -m 'add foo' -d '1000000 0' # rev 0
95 hg tag -d '1000000 0' -f bar # rev 1 bar -> 0
95 hg tag -d '1000000 0' -f bar # rev 1 bar -> 0
96 hg tag -d '1000000 0' -f bar # rev 2 bar -> 1
96 hg tag -d '1000000 0' -f bar # rev 2 bar -> 1
97 hg tag -d '1000000 0' -fr 0 bar # rev 3 bar -> 0
97 hg tag -d '1000000 0' -fr 0 bar # rev 3 bar -> 0
98 hg tag -d '1000000 0' -fr 1 bar # rev 3 bar -> 1
98 hg tag -d '1000000 0' -fr 1 bar # rev 3 bar -> 1
99 hg tag -d '1000000 0' -fr 0 bar # rev 4 bar -> 0
99 hg tag -d '1000000 0' -fr 0 bar # rev 4 bar -> 0
100 hg tags
100 hg tags
101 hg co 3
101 hg co 3
102 echo barbar > foo
102 echo barbar > foo
103 hg ci -m 'change foo' -d '1000000 0' # rev 0
103 hg ci -m 'change foo' -d '1000000 0' # rev 0
104 hg tags
104 hg tags
105
105
106 hg tag -d '1000000 0' -r 3 bar # should complain
106 hg tag -d '1000000 0' -r 3 bar # should complain
107 hg tags No newline at end of file
107 hg tags
108
109 # test tag rank with 3 heads
110 cd ..
111 hg init t4
112 cd t4
113 echo foo > foo
114 hg add
115 hg ci -m 'add foo' -d '0 0' # rev 0
116 hg tag -d '0 0' bar # rev 1 bar -> 0
117 hg tag -d '0 0' -f bar # rev 2 bar -> 1
118 hg up -qC 0
119 hg tag -d '0 0' -fr 2 bar # rev 3 bar -> 2
120 hg tags
121 hg up -qC 0
122 hg tag -d '0 0' -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
123 echo % bar should still point to rev 2
124 hg tags
@@ -1,59 +1,65 b''
1 unknown
1 unknown
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 0acdaf898367 tip
3 0acdaf898367 tip
4 tip 0:0acdaf898367
4 tip 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
5 This is a local tag with a really long name! 0:0acdaf898367
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
7 tip 1:8a3ca90d111d
7 tip 1:8a3ca90d111d
8 first 0:0acdaf898367
8 first 0:0acdaf898367
9 8a3ca90d111d tip
9 8a3ca90d111d tip
10 M a
10 M a
11 8a3ca90d111d+ tip
11 8a3ca90d111d+ tip
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
13 0acdaf898367+ first
13 0acdaf898367+ first
14 0acdaf898367+ first
14 0acdaf898367+ first
15 M a
15 M a
16 8216907a933d tip
16 8216907a933d tip
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 (branch merge, don't forget to commit)
18 (branch merge, don't forget to commit)
19 8216907a933d+8a3ca90d111d+ tip
19 8216907a933d+8a3ca90d111d+ tip
20 M .hgtags
20 M .hgtags
21 tip 6:e2174d339386
21 tip 6:e2174d339386
22 first 0:0acdaf898367
22 first 0:0acdaf898367
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 .hgtags@c071f74ab5eb, line 2: cannot parse entry
24 .hgtags@c071f74ab5eb, line 2: cannot parse entry
25 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
25 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
26 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
26 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
27 localtags, line 1: tag 'invalid' refers to unknown node
27 localtags, line 1: tag 'invalid' refers to unknown node
28 tip 8:4ca6f1b1a68c
28 tip 8:4ca6f1b1a68c
29 first 0:0acdaf898367
29 first 0:0acdaf898367
30 changeset: 8:4ca6f1b1a68c
30 changeset: 8:4ca6f1b1a68c
31 .hgtags@c071f74ab5eb, line 2: cannot parse entry
31 .hgtags@c071f74ab5eb, line 2: cannot parse entry
32 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
32 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
33 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
33 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
34 localtags, line 1: tag 'invalid' refers to unknown node
34 localtags, line 1: tag 'invalid' refers to unknown node
35 tag: tip
35 tag: tip
36 parent: 3:b2ef3841386b
36 parent: 3:b2ef3841386b
37 user: test
37 user: test
38 date: Mon Jan 12 13:46:40 1970 +0000
38 date: Mon Jan 12 13:46:40 1970 +0000
39 summary: head
39 summary: head
40
40
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 tip 4:36195b728445
43 tip 4:36195b728445
44 bar 1:b204a97e6e8d
44 bar 1:b204a97e6e8d
45 changeset: 5:57e1983b4a60
45 changeset: 5:57e1983b4a60
46 tag: tip
46 tag: tip
47 user: test
47 user: test
48 date: Mon Jan 12 13:46:40 1970 +0000
48 date: Mon Jan 12 13:46:40 1970 +0000
49 summary: Removed tag bar
49 summary: Removed tag bar
50
50
51 tip 5:57e1983b4a60
51 tip 5:57e1983b4a60
52 tip 5:d8bb4d1eff25
52 tip 5:d8bb4d1eff25
53 bar 0:b409d9da318e
53 bar 0:b409d9da318e
54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 tip 6:b5ff9d142648
55 tip 6:b5ff9d142648
56 bar 0:b409d9da318e
56 bar 0:b409d9da318e
57 abort: a tag named bar already exists (use -f to force)
57 abort: a tag named bar already exists (use -f to force)
58 tip 6:b5ff9d142648
58 tip 6:b5ff9d142648
59 bar 0:b409d9da318e
59 bar 0:b409d9da318e
60 adding foo
61 tip 3:ca8479b4351c
62 bar 2:72b852876a42
63 % bar should still point to rev 2
64 tip 4:40af5d225513
65 bar 2:72b852876a42
General Comments 0
You need to be logged in to leave comments. Login now