##// END OF EJS Templates
tags: do not merge bookmarks with tags...
David Soria Parra -
r13385:d012d954 default
parent child Browse files
Show More
@@ -1,2013 +1,2012 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164 @util.propertycache
164 @util.propertycache
165 def _bookmarks(self):
165 def _bookmarks(self):
166 return bookmarks.read(self)
166 return bookmarks.read(self)
167
167
168 @util.propertycache
168 @util.propertycache
169 def _bookmarkcurrent(self):
169 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
170 return bookmarks.readcurrent(self)
171
171
172 @propertycache
172 @propertycache
173 def changelog(self):
173 def changelog(self):
174 c = changelog.changelog(self.sopener)
174 c = changelog.changelog(self.sopener)
175 if 'HG_PENDING' in os.environ:
175 if 'HG_PENDING' in os.environ:
176 p = os.environ['HG_PENDING']
176 p = os.environ['HG_PENDING']
177 if p.startswith(self.root):
177 if p.startswith(self.root):
178 c.readpending('00changelog.i.a')
178 c.readpending('00changelog.i.a')
179 self.sopener.options['defversion'] = c.version
179 self.sopener.options['defversion'] = c.version
180 return c
180 return c
181
181
182 @propertycache
182 @propertycache
183 def manifest(self):
183 def manifest(self):
184 return manifest.manifest(self.sopener)
184 return manifest.manifest(self.sopener)
185
185
186 @propertycache
186 @propertycache
187 def dirstate(self):
187 def dirstate(self):
188 warned = [0]
188 warned = [0]
189 def validate(node):
189 def validate(node):
190 try:
190 try:
191 r = self.changelog.rev(node)
191 r = self.changelog.rev(node)
192 return node
192 return node
193 except error.LookupError:
193 except error.LookupError:
194 if not warned[0]:
194 if not warned[0]:
195 warned[0] = True
195 warned[0] = True
196 self.ui.warn(_("warning: ignoring unknown"
196 self.ui.warn(_("warning: ignoring unknown"
197 " working parent %s!\n") % short(node))
197 " working parent %s!\n") % short(node))
198 return nullid
198 return nullid
199
199
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201
201
202 def __getitem__(self, changeid):
202 def __getitem__(self, changeid):
203 if changeid is None:
203 if changeid is None:
204 return context.workingctx(self)
204 return context.workingctx(self)
205 return context.changectx(self, changeid)
205 return context.changectx(self, changeid)
206
206
207 def __contains__(self, changeid):
207 def __contains__(self, changeid):
208 try:
208 try:
209 return bool(self.lookup(changeid))
209 return bool(self.lookup(changeid))
210 except error.RepoLookupError:
210 except error.RepoLookupError:
211 return False
211 return False
212
212
213 def __nonzero__(self):
213 def __nonzero__(self):
214 return True
214 return True
215
215
216 def __len__(self):
216 def __len__(self):
217 return len(self.changelog)
217 return len(self.changelog)
218
218
219 def __iter__(self):
219 def __iter__(self):
220 for i in xrange(len(self)):
220 for i in xrange(len(self)):
221 yield i
221 yield i
222
222
223 def url(self):
223 def url(self):
224 return 'file:' + self.root
224 return 'file:' + self.root
225
225
226 def hook(self, name, throw=False, **args):
226 def hook(self, name, throw=False, **args):
227 return hook.hook(self.ui, self, name, throw, **args)
227 return hook.hook(self.ui, self, name, throw, **args)
228
228
229 tag_disallowed = ':\r\n'
229 tag_disallowed = ':\r\n'
230
230
231 def _tag(self, names, node, message, local, user, date, extra={}):
231 def _tag(self, names, node, message, local, user, date, extra={}):
232 if isinstance(names, str):
232 if isinstance(names, str):
233 allchars = names
233 allchars = names
234 names = (names,)
234 names = (names,)
235 else:
235 else:
236 allchars = ''.join(names)
236 allchars = ''.join(names)
237 for c in self.tag_disallowed:
237 for c in self.tag_disallowed:
238 if c in allchars:
238 if c in allchars:
239 raise util.Abort(_('%r cannot be used in a tag name') % c)
239 raise util.Abort(_('%r cannot be used in a tag name') % c)
240
240
241 branches = self.branchmap()
241 branches = self.branchmap()
242 for name in names:
242 for name in names:
243 self.hook('pretag', throw=True, node=hex(node), tag=name,
243 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 local=local)
244 local=local)
245 if name in branches:
245 if name in branches:
246 self.ui.warn(_("warning: tag %s conflicts with existing"
246 self.ui.warn(_("warning: tag %s conflicts with existing"
247 " branch name\n") % name)
247 " branch name\n") % name)
248
248
249 def writetags(fp, names, munge, prevtags):
249 def writetags(fp, names, munge, prevtags):
250 fp.seek(0, 2)
250 fp.seek(0, 2)
251 if prevtags and prevtags[-1] != '\n':
251 if prevtags and prevtags[-1] != '\n':
252 fp.write('\n')
252 fp.write('\n')
253 for name in names:
253 for name in names:
254 m = munge and munge(name) or name
254 m = munge and munge(name) or name
255 if self._tagtypes and name in self._tagtypes:
255 if self._tagtypes and name in self._tagtypes:
256 old = self._tags.get(name, nullid)
256 old = self._tags.get(name, nullid)
257 fp.write('%s %s\n' % (hex(old), m))
257 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(node), m))
258 fp.write('%s %s\n' % (hex(node), m))
259 fp.close()
259 fp.close()
260
260
261 prevtags = ''
261 prevtags = ''
262 if local:
262 if local:
263 try:
263 try:
264 fp = self.opener('localtags', 'r+')
264 fp = self.opener('localtags', 'r+')
265 except IOError:
265 except IOError:
266 fp = self.opener('localtags', 'a')
266 fp = self.opener('localtags', 'a')
267 else:
267 else:
268 prevtags = fp.read()
268 prevtags = fp.read()
269
269
270 # local tags are stored in the current charset
270 # local tags are stored in the current charset
271 writetags(fp, names, None, prevtags)
271 writetags(fp, names, None, prevtags)
272 for name in names:
272 for name in names:
273 self.hook('tag', node=hex(node), tag=name, local=local)
273 self.hook('tag', node=hex(node), tag=name, local=local)
274 return
274 return
275
275
276 try:
276 try:
277 fp = self.wfile('.hgtags', 'rb+')
277 fp = self.wfile('.hgtags', 'rb+')
278 except IOError:
278 except IOError:
279 fp = self.wfile('.hgtags', 'ab')
279 fp = self.wfile('.hgtags', 'ab')
280 else:
280 else:
281 prevtags = fp.read()
281 prevtags = fp.read()
282
282
283 # committed tags are stored in UTF-8
283 # committed tags are stored in UTF-8
284 writetags(fp, names, encoding.fromlocal, prevtags)
284 writetags(fp, names, encoding.fromlocal, prevtags)
285
285
286 if '.hgtags' not in self.dirstate:
286 if '.hgtags' not in self.dirstate:
287 self[None].add(['.hgtags'])
287 self[None].add(['.hgtags'])
288
288
289 m = matchmod.exact(self.root, '', ['.hgtags'])
289 m = matchmod.exact(self.root, '', ['.hgtags'])
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
291
291
292 for name in names:
292 for name in names:
293 self.hook('tag', node=hex(node), tag=name, local=local)
293 self.hook('tag', node=hex(node), tag=name, local=local)
294
294
295 return tagnode
295 return tagnode
296
296
297 def tag(self, names, node, message, local, user, date):
297 def tag(self, names, node, message, local, user, date):
298 '''tag a revision with one or more symbolic names.
298 '''tag a revision with one or more symbolic names.
299
299
300 names is a list of strings or, when adding a single tag, names may be a
300 names is a list of strings or, when adding a single tag, names may be a
301 string.
301 string.
302
302
303 if local is True, the tags are stored in a per-repository file.
303 if local is True, the tags are stored in a per-repository file.
304 otherwise, they are stored in the .hgtags file, and a new
304 otherwise, they are stored in the .hgtags file, and a new
305 changeset is committed with the change.
305 changeset is committed with the change.
306
306
307 keyword arguments:
307 keyword arguments:
308
308
309 local: whether to store tags in non-version-controlled file
309 local: whether to store tags in non-version-controlled file
310 (default False)
310 (default False)
311
311
312 message: commit message to use if committing
312 message: commit message to use if committing
313
313
314 user: name of user to use if committing
314 user: name of user to use if committing
315
315
316 date: date tuple to use if committing'''
316 date: date tuple to use if committing'''
317
317
318 if not local:
318 if not local:
319 for x in self.status()[:5]:
319 for x in self.status()[:5]:
320 if '.hgtags' in x:
320 if '.hgtags' in x:
321 raise util.Abort(_('working copy of .hgtags is changed '
321 raise util.Abort(_('working copy of .hgtags is changed '
322 '(please commit .hgtags manually)'))
322 '(please commit .hgtags manually)'))
323
323
324 self.tags() # instantiate the cache
324 self.tags() # instantiate the cache
325 self._tag(names, node, message, local, user, date)
325 self._tag(names, node, message, local, user, date)
326
326
327 def tags(self):
327 def tags(self):
328 '''return a mapping of tag to node'''
328 '''return a mapping of tag to node'''
329 if self._tags is None:
329 if self._tags is None:
330 (self._tags, self._tagtypes) = self._findtags()
330 (self._tags, self._tagtypes) = self._findtags()
331
331
332 return self._tags
332 return self._tags
333
333
334 def _findtags(self):
334 def _findtags(self):
335 '''Do the hard work of finding tags. Return a pair of dicts
335 '''Do the hard work of finding tags. Return a pair of dicts
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
337 maps tag name to a string like \'global\' or \'local\'.
337 maps tag name to a string like \'global\' or \'local\'.
338 Subclasses or extensions are free to add their own tags, but
338 Subclasses or extensions are free to add their own tags, but
339 should be aware that the returned dicts will be retained for the
339 should be aware that the returned dicts will be retained for the
340 duration of the localrepo object.'''
340 duration of the localrepo object.'''
341
341
342 # XXX what tagtype should subclasses/extensions use? Currently
342 # XXX what tagtype should subclasses/extensions use? Currently
343 # mq and bookmarks add tags, but do not set the tagtype at all.
343 # mq and bookmarks add tags, but do not set the tagtype at all.
344 # Should each extension invent its own tag type? Should there
344 # Should each extension invent its own tag type? Should there
345 # be one tagtype for all such "virtual" tags? Or is the status
345 # be one tagtype for all such "virtual" tags? Or is the status
346 # quo fine?
346 # quo fine?
347
347
348 alltags = {} # map tag name to (node, hist)
348 alltags = {} # map tag name to (node, hist)
349 tagtypes = {}
349 tagtypes = {}
350
350
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
353
353
354 # Build the return dicts. Have to re-encode tag names because
354 # Build the return dicts. Have to re-encode tag names because
355 # the tags module always uses UTF-8 (in order not to lose info
355 # the tags module always uses UTF-8 (in order not to lose info
356 # writing to the cache), but the rest of Mercurial wants them in
356 # writing to the cache), but the rest of Mercurial wants them in
357 # local encoding.
357 # local encoding.
358 tags = {}
358 tags = {}
359 for (name, (node, hist)) in alltags.iteritems():
359 for (name, (node, hist)) in alltags.iteritems():
360 if node != nullid:
360 if node != nullid:
361 tags[encoding.tolocal(name)] = node
361 tags[encoding.tolocal(name)] = node
362 tags['tip'] = self.changelog.tip()
362 tags['tip'] = self.changelog.tip()
363 tags.update(self._bookmarks)
364 tagtypes = dict([(encoding.tolocal(name), value)
363 tagtypes = dict([(encoding.tolocal(name), value)
365 for (name, value) in tagtypes.iteritems()])
364 for (name, value) in tagtypes.iteritems()])
366 return (tags, tagtypes)
365 return (tags, tagtypes)
367
366
368 def tagtype(self, tagname):
367 def tagtype(self, tagname):
369 '''
368 '''
370 return the type of the given tag. result can be:
369 return the type of the given tag. result can be:
371
370
372 'local' : a local tag
371 'local' : a local tag
373 'global' : a global tag
372 'global' : a global tag
374 None : tag does not exist
373 None : tag does not exist
375 '''
374 '''
376
375
377 self.tags()
376 self.tags()
378
377
379 return self._tagtypes.get(tagname)
378 return self._tagtypes.get(tagname)
380
379
381 def tagslist(self):
380 def tagslist(self):
382 '''return a list of tags ordered by revision'''
381 '''return a list of tags ordered by revision'''
383 l = []
382 l = []
384 for t, n in self.tags().iteritems():
383 for t, n in self.tags().iteritems():
385 try:
384 try:
386 r = self.changelog.rev(n)
385 r = self.changelog.rev(n)
387 except:
386 except:
388 r = -2 # sort to the beginning of the list if unknown
387 r = -2 # sort to the beginning of the list if unknown
389 l.append((r, t, n))
388 l.append((r, t, n))
390 return [(t, n) for r, t, n in sorted(l)]
389 return [(t, n) for r, t, n in sorted(l)]
391
390
392 def nodetags(self, node):
391 def nodetags(self, node):
393 '''return the tags associated with a node'''
392 '''return the tags associated with a node'''
394 if not self.nodetagscache:
393 if not self.nodetagscache:
395 self.nodetagscache = {}
394 self.nodetagscache = {}
396 for t, n in self.tags().iteritems():
395 for t, n in self.tags().iteritems():
397 self.nodetagscache.setdefault(n, []).append(t)
396 self.nodetagscache.setdefault(n, []).append(t)
398 for tags in self.nodetagscache.itervalues():
397 for tags in self.nodetagscache.itervalues():
399 tags.sort()
398 tags.sort()
400 return self.nodetagscache.get(node, [])
399 return self.nodetagscache.get(node, [])
401
400
402 def nodebookmarks(self, node):
401 def nodebookmarks(self, node):
403 marks = []
402 marks = []
404 for bookmark, n in self._bookmarks.iteritems():
403 for bookmark, n in self._bookmarks.iteritems():
405 if n == node:
404 if n == node:
406 marks.append(bookmark)
405 marks.append(bookmark)
407 return sorted(marks)
406 return sorted(marks)
408
407
409 def _branchtags(self, partial, lrev):
408 def _branchtags(self, partial, lrev):
410 # TODO: rename this function?
409 # TODO: rename this function?
411 tiprev = len(self) - 1
410 tiprev = len(self) - 1
412 if lrev != tiprev:
411 if lrev != tiprev:
413 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
412 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
414 self._updatebranchcache(partial, ctxgen)
413 self._updatebranchcache(partial, ctxgen)
415 self._writebranchcache(partial, self.changelog.tip(), tiprev)
414 self._writebranchcache(partial, self.changelog.tip(), tiprev)
416
415
417 return partial
416 return partial
418
417
419 def updatebranchcache(self):
418 def updatebranchcache(self):
420 tip = self.changelog.tip()
419 tip = self.changelog.tip()
421 if self._branchcache is not None and self._branchcachetip == tip:
420 if self._branchcache is not None and self._branchcachetip == tip:
422 return self._branchcache
421 return self._branchcache
423
422
424 oldtip = self._branchcachetip
423 oldtip = self._branchcachetip
425 self._branchcachetip = tip
424 self._branchcachetip = tip
426 if oldtip is None or oldtip not in self.changelog.nodemap:
425 if oldtip is None or oldtip not in self.changelog.nodemap:
427 partial, last, lrev = self._readbranchcache()
426 partial, last, lrev = self._readbranchcache()
428 else:
427 else:
429 lrev = self.changelog.rev(oldtip)
428 lrev = self.changelog.rev(oldtip)
430 partial = self._branchcache
429 partial = self._branchcache
431
430
432 self._branchtags(partial, lrev)
431 self._branchtags(partial, lrev)
433 # this private cache holds all heads (not just tips)
432 # this private cache holds all heads (not just tips)
434 self._branchcache = partial
433 self._branchcache = partial
435
434
436 def branchmap(self):
435 def branchmap(self):
437 '''returns a dictionary {branch: [branchheads]}'''
436 '''returns a dictionary {branch: [branchheads]}'''
438 self.updatebranchcache()
437 self.updatebranchcache()
439 return self._branchcache
438 return self._branchcache
440
439
441 def branchtags(self):
440 def branchtags(self):
442 '''return a dict where branch names map to the tipmost head of
441 '''return a dict where branch names map to the tipmost head of
443 the branch, open heads come before closed'''
442 the branch, open heads come before closed'''
444 bt = {}
443 bt = {}
445 for bn, heads in self.branchmap().iteritems():
444 for bn, heads in self.branchmap().iteritems():
446 tip = heads[-1]
445 tip = heads[-1]
447 for h in reversed(heads):
446 for h in reversed(heads):
448 if 'close' not in self.changelog.read(h)[5]:
447 if 'close' not in self.changelog.read(h)[5]:
449 tip = h
448 tip = h
450 break
449 break
451 bt[bn] = tip
450 bt[bn] = tip
452 return bt
451 return bt
453
452
454 def _readbranchcache(self):
453 def _readbranchcache(self):
455 partial = {}
454 partial = {}
456 try:
455 try:
457 f = self.opener("cache/branchheads")
456 f = self.opener("cache/branchheads")
458 lines = f.read().split('\n')
457 lines = f.read().split('\n')
459 f.close()
458 f.close()
460 except (IOError, OSError):
459 except (IOError, OSError):
461 return {}, nullid, nullrev
460 return {}, nullid, nullrev
462
461
463 try:
462 try:
464 last, lrev = lines.pop(0).split(" ", 1)
463 last, lrev = lines.pop(0).split(" ", 1)
465 last, lrev = bin(last), int(lrev)
464 last, lrev = bin(last), int(lrev)
466 if lrev >= len(self) or self[lrev].node() != last:
465 if lrev >= len(self) or self[lrev].node() != last:
467 # invalidate the cache
466 # invalidate the cache
468 raise ValueError('invalidating branch cache (tip differs)')
467 raise ValueError('invalidating branch cache (tip differs)')
469 for l in lines:
468 for l in lines:
470 if not l:
469 if not l:
471 continue
470 continue
472 node, label = l.split(" ", 1)
471 node, label = l.split(" ", 1)
473 label = encoding.tolocal(label.strip())
472 label = encoding.tolocal(label.strip())
474 partial.setdefault(label, []).append(bin(node))
473 partial.setdefault(label, []).append(bin(node))
475 except KeyboardInterrupt:
474 except KeyboardInterrupt:
476 raise
475 raise
477 except Exception, inst:
476 except Exception, inst:
478 if self.ui.debugflag:
477 if self.ui.debugflag:
479 self.ui.warn(str(inst), '\n')
478 self.ui.warn(str(inst), '\n')
480 partial, last, lrev = {}, nullid, nullrev
479 partial, last, lrev = {}, nullid, nullrev
481 return partial, last, lrev
480 return partial, last, lrev
482
481
483 def _writebranchcache(self, branches, tip, tiprev):
482 def _writebranchcache(self, branches, tip, tiprev):
484 try:
483 try:
485 f = self.opener("cache/branchheads", "w", atomictemp=True)
484 f = self.opener("cache/branchheads", "w", atomictemp=True)
486 f.write("%s %s\n" % (hex(tip), tiprev))
485 f.write("%s %s\n" % (hex(tip), tiprev))
487 for label, nodes in branches.iteritems():
486 for label, nodes in branches.iteritems():
488 for node in nodes:
487 for node in nodes:
489 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
488 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
490 f.rename()
489 f.rename()
491 except (IOError, OSError):
490 except (IOError, OSError):
492 pass
491 pass
493
492
494 def _updatebranchcache(self, partial, ctxgen):
493 def _updatebranchcache(self, partial, ctxgen):
495 # collect new branch entries
494 # collect new branch entries
496 newbranches = {}
495 newbranches = {}
497 for c in ctxgen:
496 for c in ctxgen:
498 newbranches.setdefault(c.branch(), []).append(c.node())
497 newbranches.setdefault(c.branch(), []).append(c.node())
499 # if older branchheads are reachable from new ones, they aren't
498 # if older branchheads are reachable from new ones, they aren't
500 # really branchheads. Note checking parents is insufficient:
499 # really branchheads. Note checking parents is insufficient:
501 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
500 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
502 for branch, newnodes in newbranches.iteritems():
501 for branch, newnodes in newbranches.iteritems():
503 bheads = partial.setdefault(branch, [])
502 bheads = partial.setdefault(branch, [])
504 bheads.extend(newnodes)
503 bheads.extend(newnodes)
505 if len(bheads) <= 1:
504 if len(bheads) <= 1:
506 continue
505 continue
507 # starting from tip means fewer passes over reachable
506 # starting from tip means fewer passes over reachable
508 while newnodes:
507 while newnodes:
509 latest = newnodes.pop()
508 latest = newnodes.pop()
510 if latest not in bheads:
509 if latest not in bheads:
511 continue
510 continue
512 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
511 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
513 reachable = self.changelog.reachable(latest, minbhrev)
512 reachable = self.changelog.reachable(latest, minbhrev)
514 reachable.remove(latest)
513 reachable.remove(latest)
515 bheads = [b for b in bheads if b not in reachable]
514 bheads = [b for b in bheads if b not in reachable]
516 partial[branch] = bheads
515 partial[branch] = bheads
517
516
518 def lookup(self, key):
517 def lookup(self, key):
519 if isinstance(key, int):
518 if isinstance(key, int):
520 return self.changelog.node(key)
519 return self.changelog.node(key)
521 elif key == '.':
520 elif key == '.':
522 return self.dirstate.parents()[0]
521 return self.dirstate.parents()[0]
523 elif key == 'null':
522 elif key == 'null':
524 return nullid
523 return nullid
525 elif key == 'tip':
524 elif key == 'tip':
526 return self.changelog.tip()
525 return self.changelog.tip()
527 n = self.changelog._match(key)
526 n = self.changelog._match(key)
528 if n:
527 if n:
529 return n
528 return n
530 if key in self._bookmarks:
529 if key in self._bookmarks:
531 return self._bookmarks[key]
530 return self._bookmarks[key]
532 if key in self.tags():
531 if key in self.tags():
533 return self.tags()[key]
532 return self.tags()[key]
534 if key in self.branchtags():
533 if key in self.branchtags():
535 return self.branchtags()[key]
534 return self.branchtags()[key]
536 n = self.changelog._partialmatch(key)
535 n = self.changelog._partialmatch(key)
537 if n:
536 if n:
538 return n
537 return n
539
538
540 # can't find key, check if it might have come from damaged dirstate
539 # can't find key, check if it might have come from damaged dirstate
541 if key in self.dirstate.parents():
540 if key in self.dirstate.parents():
542 raise error.Abort(_("working directory has unknown parent '%s'!")
541 raise error.Abort(_("working directory has unknown parent '%s'!")
543 % short(key))
542 % short(key))
544 try:
543 try:
545 if len(key) == 20:
544 if len(key) == 20:
546 key = hex(key)
545 key = hex(key)
547 except:
546 except:
548 pass
547 pass
549 raise error.RepoLookupError(_("unknown revision '%s'") % key)
548 raise error.RepoLookupError(_("unknown revision '%s'") % key)
550
549
551 def lookupbranch(self, key, remote=None):
550 def lookupbranch(self, key, remote=None):
552 repo = remote or self
551 repo = remote or self
553 if key in repo.branchmap():
552 if key in repo.branchmap():
554 return key
553 return key
555
554
556 repo = (remote and remote.local()) and remote or self
555 repo = (remote and remote.local()) and remote or self
557 return repo[key].branch()
556 return repo[key].branch()
558
557
559 def local(self):
558 def local(self):
560 return True
559 return True
561
560
562 def join(self, f):
561 def join(self, f):
563 return os.path.join(self.path, f)
562 return os.path.join(self.path, f)
564
563
565 def wjoin(self, f):
564 def wjoin(self, f):
566 return os.path.join(self.root, f)
565 return os.path.join(self.root, f)
567
566
568 def file(self, f):
567 def file(self, f):
569 if f[0] == '/':
568 if f[0] == '/':
570 f = f[1:]
569 f = f[1:]
571 return filelog.filelog(self.sopener, f)
570 return filelog.filelog(self.sopener, f)
572
571
573 def changectx(self, changeid):
572 def changectx(self, changeid):
574 return self[changeid]
573 return self[changeid]
575
574
576 def parents(self, changeid=None):
575 def parents(self, changeid=None):
577 '''get list of changectxs for parents of changeid'''
576 '''get list of changectxs for parents of changeid'''
578 return self[changeid].parents()
577 return self[changeid].parents()
579
578
580 def filectx(self, path, changeid=None, fileid=None):
579 def filectx(self, path, changeid=None, fileid=None):
581 """changeid can be a changeset revision, node, or tag.
580 """changeid can be a changeset revision, node, or tag.
582 fileid can be a file revision or node."""
581 fileid can be a file revision or node."""
583 return context.filectx(self, path, changeid, fileid)
582 return context.filectx(self, path, changeid, fileid)
584
583
585 def getcwd(self):
584 def getcwd(self):
586 return self.dirstate.getcwd()
585 return self.dirstate.getcwd()
587
586
588 def pathto(self, f, cwd=None):
587 def pathto(self, f, cwd=None):
589 return self.dirstate.pathto(f, cwd)
588 return self.dirstate.pathto(f, cwd)
590
589
591 def wfile(self, f, mode='r'):
590 def wfile(self, f, mode='r'):
592 return self.wopener(f, mode)
591 return self.wopener(f, mode)
593
592
594 def _link(self, f):
593 def _link(self, f):
595 return os.path.islink(self.wjoin(f))
594 return os.path.islink(self.wjoin(f))
596
595
597 def _loadfilter(self, filter):
596 def _loadfilter(self, filter):
598 if filter not in self.filterpats:
597 if filter not in self.filterpats:
599 l = []
598 l = []
600 for pat, cmd in self.ui.configitems(filter):
599 for pat, cmd in self.ui.configitems(filter):
601 if cmd == '!':
600 if cmd == '!':
602 continue
601 continue
603 mf = matchmod.match(self.root, '', [pat])
602 mf = matchmod.match(self.root, '', [pat])
604 fn = None
603 fn = None
605 params = cmd
604 params = cmd
606 for name, filterfn in self._datafilters.iteritems():
605 for name, filterfn in self._datafilters.iteritems():
607 if cmd.startswith(name):
606 if cmd.startswith(name):
608 fn = filterfn
607 fn = filterfn
609 params = cmd[len(name):].lstrip()
608 params = cmd[len(name):].lstrip()
610 break
609 break
611 if not fn:
610 if not fn:
612 fn = lambda s, c, **kwargs: util.filter(s, c)
611 fn = lambda s, c, **kwargs: util.filter(s, c)
613 # Wrap old filters not supporting keyword arguments
612 # Wrap old filters not supporting keyword arguments
614 if not inspect.getargspec(fn)[2]:
613 if not inspect.getargspec(fn)[2]:
615 oldfn = fn
614 oldfn = fn
616 fn = lambda s, c, **kwargs: oldfn(s, c)
615 fn = lambda s, c, **kwargs: oldfn(s, c)
617 l.append((mf, fn, params))
616 l.append((mf, fn, params))
618 self.filterpats[filter] = l
617 self.filterpats[filter] = l
619 return self.filterpats[filter]
618 return self.filterpats[filter]
620
619
621 def _filter(self, filterpats, filename, data):
620 def _filter(self, filterpats, filename, data):
622 for mf, fn, cmd in filterpats:
621 for mf, fn, cmd in filterpats:
623 if mf(filename):
622 if mf(filename):
624 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
623 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
625 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
624 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
626 break
625 break
627
626
628 return data
627 return data
629
628
630 @propertycache
629 @propertycache
631 def _encodefilterpats(self):
630 def _encodefilterpats(self):
632 return self._loadfilter('encode')
631 return self._loadfilter('encode')
633
632
634 @propertycache
633 @propertycache
635 def _decodefilterpats(self):
634 def _decodefilterpats(self):
636 return self._loadfilter('decode')
635 return self._loadfilter('decode')
637
636
638 def adddatafilter(self, name, filter):
637 def adddatafilter(self, name, filter):
639 self._datafilters[name] = filter
638 self._datafilters[name] = filter
640
639
641 def wread(self, filename):
640 def wread(self, filename):
642 if self._link(filename):
641 if self._link(filename):
643 data = os.readlink(self.wjoin(filename))
642 data = os.readlink(self.wjoin(filename))
644 else:
643 else:
645 data = self.wopener(filename, 'r').read()
644 data = self.wopener(filename, 'r').read()
646 return self._filter(self._encodefilterpats, filename, data)
645 return self._filter(self._encodefilterpats, filename, data)
647
646
648 def wwrite(self, filename, data, flags):
647 def wwrite(self, filename, data, flags):
649 data = self._filter(self._decodefilterpats, filename, data)
648 data = self._filter(self._decodefilterpats, filename, data)
650 if 'l' in flags:
649 if 'l' in flags:
651 self.wopener.symlink(data, filename)
650 self.wopener.symlink(data, filename)
652 else:
651 else:
653 self.wopener(filename, 'w').write(data)
652 self.wopener(filename, 'w').write(data)
654 if 'x' in flags:
653 if 'x' in flags:
655 util.set_flags(self.wjoin(filename), False, True)
654 util.set_flags(self.wjoin(filename), False, True)
656
655
657 def wwritedata(self, filename, data):
656 def wwritedata(self, filename, data):
658 return self._filter(self._decodefilterpats, filename, data)
657 return self._filter(self._decodefilterpats, filename, data)
659
658
660 def transaction(self, desc):
659 def transaction(self, desc):
661 tr = self._transref and self._transref() or None
660 tr = self._transref and self._transref() or None
662 if tr and tr.running():
661 if tr and tr.running():
663 return tr.nest()
662 return tr.nest()
664
663
665 # abort here if the journal already exists
664 # abort here if the journal already exists
666 if os.path.exists(self.sjoin("journal")):
665 if os.path.exists(self.sjoin("journal")):
667 raise error.RepoError(
666 raise error.RepoError(
668 _("abandoned transaction found - run hg recover"))
667 _("abandoned transaction found - run hg recover"))
669
668
670 # save dirstate for rollback
669 # save dirstate for rollback
671 try:
670 try:
672 ds = self.opener("dirstate").read()
671 ds = self.opener("dirstate").read()
673 except IOError:
672 except IOError:
674 ds = ""
673 ds = ""
675 self.opener("journal.dirstate", "w").write(ds)
674 self.opener("journal.dirstate", "w").write(ds)
676 self.opener("journal.branch", "w").write(
675 self.opener("journal.branch", "w").write(
677 encoding.fromlocal(self.dirstate.branch()))
676 encoding.fromlocal(self.dirstate.branch()))
678 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
677 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
679
678
680 renames = [(self.sjoin("journal"), self.sjoin("undo")),
679 renames = [(self.sjoin("journal"), self.sjoin("undo")),
681 (self.join("journal.dirstate"), self.join("undo.dirstate")),
680 (self.join("journal.dirstate"), self.join("undo.dirstate")),
682 (self.join("journal.branch"), self.join("undo.branch")),
681 (self.join("journal.branch"), self.join("undo.branch")),
683 (self.join("journal.desc"), self.join("undo.desc"))]
682 (self.join("journal.desc"), self.join("undo.desc"))]
684 tr = transaction.transaction(self.ui.warn, self.sopener,
683 tr = transaction.transaction(self.ui.warn, self.sopener,
685 self.sjoin("journal"),
684 self.sjoin("journal"),
686 aftertrans(renames),
685 aftertrans(renames),
687 self.store.createmode)
686 self.store.createmode)
688 self._transref = weakref.ref(tr)
687 self._transref = weakref.ref(tr)
689 return tr
688 return tr
690
689
691 def recover(self):
690 def recover(self):
692 lock = self.lock()
691 lock = self.lock()
693 try:
692 try:
694 if os.path.exists(self.sjoin("journal")):
693 if os.path.exists(self.sjoin("journal")):
695 self.ui.status(_("rolling back interrupted transaction\n"))
694 self.ui.status(_("rolling back interrupted transaction\n"))
696 transaction.rollback(self.sopener, self.sjoin("journal"),
695 transaction.rollback(self.sopener, self.sjoin("journal"),
697 self.ui.warn)
696 self.ui.warn)
698 self.invalidate()
697 self.invalidate()
699 return True
698 return True
700 else:
699 else:
701 self.ui.warn(_("no interrupted transaction available\n"))
700 self.ui.warn(_("no interrupted transaction available\n"))
702 return False
701 return False
703 finally:
702 finally:
704 lock.release()
703 lock.release()
705
704
706 def rollback(self, dryrun=False):
705 def rollback(self, dryrun=False):
707 wlock = lock = None
706 wlock = lock = None
708 try:
707 try:
709 wlock = self.wlock()
708 wlock = self.wlock()
710 lock = self.lock()
709 lock = self.lock()
711 if os.path.exists(self.sjoin("undo")):
710 if os.path.exists(self.sjoin("undo")):
712 try:
711 try:
713 args = self.opener("undo.desc", "r").read().splitlines()
712 args = self.opener("undo.desc", "r").read().splitlines()
714 if len(args) >= 3 and self.ui.verbose:
713 if len(args) >= 3 and self.ui.verbose:
715 desc = _("rolling back to revision %s"
714 desc = _("rolling back to revision %s"
716 " (undo %s: %s)\n") % (
715 " (undo %s: %s)\n") % (
717 int(args[0]) - 1, args[1], args[2])
716 int(args[0]) - 1, args[1], args[2])
718 elif len(args) >= 2:
717 elif len(args) >= 2:
719 desc = _("rolling back to revision %s (undo %s)\n") % (
718 desc = _("rolling back to revision %s (undo %s)\n") % (
720 int(args[0]) - 1, args[1])
719 int(args[0]) - 1, args[1])
721 except IOError:
720 except IOError:
722 desc = _("rolling back unknown transaction\n")
721 desc = _("rolling back unknown transaction\n")
723 self.ui.status(desc)
722 self.ui.status(desc)
724 if dryrun:
723 if dryrun:
725 return
724 return
726 transaction.rollback(self.sopener, self.sjoin("undo"),
725 transaction.rollback(self.sopener, self.sjoin("undo"),
727 self.ui.warn)
726 self.ui.warn)
728 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
727 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
729 if os.path.exists(self.join('undo.bookmarks')):
728 if os.path.exists(self.join('undo.bookmarks')):
730 util.rename(self.join('undo.bookmarks'),
729 util.rename(self.join('undo.bookmarks'),
731 self.join('bookmarks'))
730 self.join('bookmarks'))
732 try:
731 try:
733 branch = self.opener("undo.branch").read()
732 branch = self.opener("undo.branch").read()
734 self.dirstate.setbranch(branch)
733 self.dirstate.setbranch(branch)
735 except IOError:
734 except IOError:
736 self.ui.warn(_("Named branch could not be reset, "
735 self.ui.warn(_("Named branch could not be reset, "
737 "current branch still is: %s\n")
736 "current branch still is: %s\n")
738 % self.dirstate.branch())
737 % self.dirstate.branch())
739 self.invalidate()
738 self.invalidate()
740 self.dirstate.invalidate()
739 self.dirstate.invalidate()
741 self.destroyed()
740 self.destroyed()
742 else:
741 else:
743 self.ui.warn(_("no rollback information available\n"))
742 self.ui.warn(_("no rollback information available\n"))
744 return 1
743 return 1
745 finally:
744 finally:
746 release(lock, wlock)
745 release(lock, wlock)
747
746
748 def invalidatecaches(self):
747 def invalidatecaches(self):
749 self._tags = None
748 self._tags = None
750 self._tagtypes = None
749 self._tagtypes = None
751 self.nodetagscache = None
750 self.nodetagscache = None
752 self._branchcache = None # in UTF-8
751 self._branchcache = None # in UTF-8
753 self._branchcachetip = None
752 self._branchcachetip = None
754
753
755 def invalidate(self):
754 def invalidate(self):
756 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
755 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
757 if a in self.__dict__:
756 if a in self.__dict__:
758 delattr(self, a)
757 delattr(self, a)
759 self.invalidatecaches()
758 self.invalidatecaches()
760
759
761 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
760 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
762 try:
761 try:
763 l = lock.lock(lockname, 0, releasefn, desc=desc)
762 l = lock.lock(lockname, 0, releasefn, desc=desc)
764 except error.LockHeld, inst:
763 except error.LockHeld, inst:
765 if not wait:
764 if not wait:
766 raise
765 raise
767 self.ui.warn(_("waiting for lock on %s held by %r\n") %
766 self.ui.warn(_("waiting for lock on %s held by %r\n") %
768 (desc, inst.locker))
767 (desc, inst.locker))
769 # default to 600 seconds timeout
768 # default to 600 seconds timeout
770 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
769 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
771 releasefn, desc=desc)
770 releasefn, desc=desc)
772 if acquirefn:
771 if acquirefn:
773 acquirefn()
772 acquirefn()
774 return l
773 return l
775
774
776 def lock(self, wait=True):
775 def lock(self, wait=True):
777 '''Lock the repository store (.hg/store) and return a weak reference
776 '''Lock the repository store (.hg/store) and return a weak reference
778 to the lock. Use this before modifying the store (e.g. committing or
777 to the lock. Use this before modifying the store (e.g. committing or
779 stripping). If you are opening a transaction, get a lock as well.)'''
778 stripping). If you are opening a transaction, get a lock as well.)'''
780 l = self._lockref and self._lockref()
779 l = self._lockref and self._lockref()
781 if l is not None and l.held:
780 if l is not None and l.held:
782 l.lock()
781 l.lock()
783 return l
782 return l
784
783
785 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
784 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
786 _('repository %s') % self.origroot)
785 _('repository %s') % self.origroot)
787 self._lockref = weakref.ref(l)
786 self._lockref = weakref.ref(l)
788 return l
787 return l
789
788
790 def wlock(self, wait=True):
789 def wlock(self, wait=True):
791 '''Lock the non-store parts of the repository (everything under
790 '''Lock the non-store parts of the repository (everything under
792 .hg except .hg/store) and return a weak reference to the lock.
791 .hg except .hg/store) and return a weak reference to the lock.
793 Use this before modifying files in .hg.'''
792 Use this before modifying files in .hg.'''
794 l = self._wlockref and self._wlockref()
793 l = self._wlockref and self._wlockref()
795 if l is not None and l.held:
794 if l is not None and l.held:
796 l.lock()
795 l.lock()
797 return l
796 return l
798
797
799 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
798 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
800 self.dirstate.invalidate, _('working directory of %s') %
799 self.dirstate.invalidate, _('working directory of %s') %
801 self.origroot)
800 self.origroot)
802 self._wlockref = weakref.ref(l)
801 self._wlockref = weakref.ref(l)
803 return l
802 return l
804
803
805 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
804 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
806 """
805 """
807 commit an individual file as part of a larger transaction
806 commit an individual file as part of a larger transaction
808 """
807 """
809
808
810 fname = fctx.path()
809 fname = fctx.path()
811 text = fctx.data()
810 text = fctx.data()
812 flog = self.file(fname)
811 flog = self.file(fname)
813 fparent1 = manifest1.get(fname, nullid)
812 fparent1 = manifest1.get(fname, nullid)
814 fparent2 = fparent2o = manifest2.get(fname, nullid)
813 fparent2 = fparent2o = manifest2.get(fname, nullid)
815
814
816 meta = {}
815 meta = {}
817 copy = fctx.renamed()
816 copy = fctx.renamed()
818 if copy and copy[0] != fname:
817 if copy and copy[0] != fname:
819 # Mark the new revision of this file as a copy of another
818 # Mark the new revision of this file as a copy of another
820 # file. This copy data will effectively act as a parent
819 # file. This copy data will effectively act as a parent
821 # of this new revision. If this is a merge, the first
820 # of this new revision. If this is a merge, the first
822 # parent will be the nullid (meaning "look up the copy data")
821 # parent will be the nullid (meaning "look up the copy data")
823 # and the second one will be the other parent. For example:
822 # and the second one will be the other parent. For example:
824 #
823 #
825 # 0 --- 1 --- 3 rev1 changes file foo
824 # 0 --- 1 --- 3 rev1 changes file foo
826 # \ / rev2 renames foo to bar and changes it
825 # \ / rev2 renames foo to bar and changes it
827 # \- 2 -/ rev3 should have bar with all changes and
826 # \- 2 -/ rev3 should have bar with all changes and
828 # should record that bar descends from
827 # should record that bar descends from
829 # bar in rev2 and foo in rev1
828 # bar in rev2 and foo in rev1
830 #
829 #
831 # this allows this merge to succeed:
830 # this allows this merge to succeed:
832 #
831 #
833 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
832 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
834 # \ / merging rev3 and rev4 should use bar@rev2
833 # \ / merging rev3 and rev4 should use bar@rev2
835 # \- 2 --- 4 as the merge base
834 # \- 2 --- 4 as the merge base
836 #
835 #
837
836
838 cfname = copy[0]
837 cfname = copy[0]
839 crev = manifest1.get(cfname)
838 crev = manifest1.get(cfname)
840 newfparent = fparent2
839 newfparent = fparent2
841
840
842 if manifest2: # branch merge
841 if manifest2: # branch merge
843 if fparent2 == nullid or crev is None: # copied on remote side
842 if fparent2 == nullid or crev is None: # copied on remote side
844 if cfname in manifest2:
843 if cfname in manifest2:
845 crev = manifest2[cfname]
844 crev = manifest2[cfname]
846 newfparent = fparent1
845 newfparent = fparent1
847
846
848 # find source in nearest ancestor if we've lost track
847 # find source in nearest ancestor if we've lost track
849 if not crev:
848 if not crev:
850 self.ui.debug(" %s: searching for copy revision for %s\n" %
849 self.ui.debug(" %s: searching for copy revision for %s\n" %
851 (fname, cfname))
850 (fname, cfname))
852 for ancestor in self[None].ancestors():
851 for ancestor in self[None].ancestors():
853 if cfname in ancestor:
852 if cfname in ancestor:
854 crev = ancestor[cfname].filenode()
853 crev = ancestor[cfname].filenode()
855 break
854 break
856
855
857 if crev:
856 if crev:
858 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
857 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
859 meta["copy"] = cfname
858 meta["copy"] = cfname
860 meta["copyrev"] = hex(crev)
859 meta["copyrev"] = hex(crev)
861 fparent1, fparent2 = nullid, newfparent
860 fparent1, fparent2 = nullid, newfparent
862 else:
861 else:
863 self.ui.warn(_("warning: can't find ancestor for '%s' "
862 self.ui.warn(_("warning: can't find ancestor for '%s' "
864 "copied from '%s'!\n") % (fname, cfname))
863 "copied from '%s'!\n") % (fname, cfname))
865
864
866 elif fparent2 != nullid:
865 elif fparent2 != nullid:
867 # is one parent an ancestor of the other?
866 # is one parent an ancestor of the other?
868 fparentancestor = flog.ancestor(fparent1, fparent2)
867 fparentancestor = flog.ancestor(fparent1, fparent2)
869 if fparentancestor == fparent1:
868 if fparentancestor == fparent1:
870 fparent1, fparent2 = fparent2, nullid
869 fparent1, fparent2 = fparent2, nullid
871 elif fparentancestor == fparent2:
870 elif fparentancestor == fparent2:
872 fparent2 = nullid
871 fparent2 = nullid
873
872
874 # is the file changed?
873 # is the file changed?
875 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
874 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
876 changelist.append(fname)
875 changelist.append(fname)
877 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
876 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
878
877
879 # are just the flags changed during merge?
878 # are just the flags changed during merge?
880 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
879 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
881 changelist.append(fname)
880 changelist.append(fname)
882
881
883 return fparent1
882 return fparent1
884
883
885 def commit(self, text="", user=None, date=None, match=None, force=False,
884 def commit(self, text="", user=None, date=None, match=None, force=False,
886 editor=False, extra={}):
885 editor=False, extra={}):
887 """Add a new revision to current repository.
886 """Add a new revision to current repository.
888
887
889 Revision information is gathered from the working directory,
888 Revision information is gathered from the working directory,
890 match can be used to filter the committed files. If editor is
889 match can be used to filter the committed files. If editor is
891 supplied, it is called to get a commit message.
890 supplied, it is called to get a commit message.
892 """
891 """
893
892
894 def fail(f, msg):
893 def fail(f, msg):
895 raise util.Abort('%s: %s' % (f, msg))
894 raise util.Abort('%s: %s' % (f, msg))
896
895
897 if not match:
896 if not match:
898 match = matchmod.always(self.root, '')
897 match = matchmod.always(self.root, '')
899
898
900 if not force:
899 if not force:
901 vdirs = []
900 vdirs = []
902 match.dir = vdirs.append
901 match.dir = vdirs.append
903 match.bad = fail
902 match.bad = fail
904
903
905 wlock = self.wlock()
904 wlock = self.wlock()
906 try:
905 try:
907 wctx = self[None]
906 wctx = self[None]
908 merge = len(wctx.parents()) > 1
907 merge = len(wctx.parents()) > 1
909
908
910 if (not force and merge and match and
909 if (not force and merge and match and
911 (match.files() or match.anypats())):
910 (match.files() or match.anypats())):
912 raise util.Abort(_('cannot partially commit a merge '
911 raise util.Abort(_('cannot partially commit a merge '
913 '(do not specify files or patterns)'))
912 '(do not specify files or patterns)'))
914
913
915 changes = self.status(match=match, clean=force)
914 changes = self.status(match=match, clean=force)
916 if force:
915 if force:
917 changes[0].extend(changes[6]) # mq may commit unchanged files
916 changes[0].extend(changes[6]) # mq may commit unchanged files
918
917
919 # check subrepos
918 # check subrepos
920 subs = []
919 subs = []
921 removedsubs = set()
920 removedsubs = set()
922 for p in wctx.parents():
921 for p in wctx.parents():
923 removedsubs.update(s for s in p.substate if match(s))
922 removedsubs.update(s for s in p.substate if match(s))
924 for s in wctx.substate:
923 for s in wctx.substate:
925 removedsubs.discard(s)
924 removedsubs.discard(s)
926 if match(s) and wctx.sub(s).dirty():
925 if match(s) and wctx.sub(s).dirty():
927 subs.append(s)
926 subs.append(s)
928 if (subs or removedsubs):
927 if (subs or removedsubs):
929 if (not match('.hgsub') and
928 if (not match('.hgsub') and
930 '.hgsub' in (wctx.modified() + wctx.added())):
929 '.hgsub' in (wctx.modified() + wctx.added())):
931 raise util.Abort(_("can't commit subrepos without .hgsub"))
930 raise util.Abort(_("can't commit subrepos without .hgsub"))
932 if '.hgsubstate' not in changes[0]:
931 if '.hgsubstate' not in changes[0]:
933 changes[0].insert(0, '.hgsubstate')
932 changes[0].insert(0, '.hgsubstate')
934
933
935 # make sure all explicit patterns are matched
934 # make sure all explicit patterns are matched
936 if not force and match.files():
935 if not force and match.files():
937 matched = set(changes[0] + changes[1] + changes[2])
936 matched = set(changes[0] + changes[1] + changes[2])
938
937
939 for f in match.files():
938 for f in match.files():
940 if f == '.' or f in matched or f in wctx.substate:
939 if f == '.' or f in matched or f in wctx.substate:
941 continue
940 continue
942 if f in changes[3]: # missing
941 if f in changes[3]: # missing
943 fail(f, _('file not found!'))
942 fail(f, _('file not found!'))
944 if f in vdirs: # visited directory
943 if f in vdirs: # visited directory
945 d = f + '/'
944 d = f + '/'
946 for mf in matched:
945 for mf in matched:
947 if mf.startswith(d):
946 if mf.startswith(d):
948 break
947 break
949 else:
948 else:
950 fail(f, _("no match under directory!"))
949 fail(f, _("no match under directory!"))
951 elif f not in self.dirstate:
950 elif f not in self.dirstate:
952 fail(f, _("file not tracked!"))
951 fail(f, _("file not tracked!"))
953
952
954 if (not force and not extra.get("close") and not merge
953 if (not force and not extra.get("close") and not merge
955 and not (changes[0] or changes[1] or changes[2])
954 and not (changes[0] or changes[1] or changes[2])
956 and wctx.branch() == wctx.p1().branch()):
955 and wctx.branch() == wctx.p1().branch()):
957 return None
956 return None
958
957
959 ms = mergemod.mergestate(self)
958 ms = mergemod.mergestate(self)
960 for f in changes[0]:
959 for f in changes[0]:
961 if f in ms and ms[f] == 'u':
960 if f in ms and ms[f] == 'u':
962 raise util.Abort(_("unresolved merge conflicts "
961 raise util.Abort(_("unresolved merge conflicts "
963 "(see hg resolve)"))
962 "(see hg resolve)"))
964
963
965 cctx = context.workingctx(self, text, user, date, extra, changes)
964 cctx = context.workingctx(self, text, user, date, extra, changes)
966 if editor:
965 if editor:
967 cctx._text = editor(self, cctx, subs)
966 cctx._text = editor(self, cctx, subs)
968 edited = (text != cctx._text)
967 edited = (text != cctx._text)
969
968
970 # commit subs
969 # commit subs
971 if subs or removedsubs:
970 if subs or removedsubs:
972 state = wctx.substate.copy()
971 state = wctx.substate.copy()
973 for s in sorted(subs):
972 for s in sorted(subs):
974 sub = wctx.sub(s)
973 sub = wctx.sub(s)
975 self.ui.status(_('committing subrepository %s\n') %
974 self.ui.status(_('committing subrepository %s\n') %
976 subrepo.subrelpath(sub))
975 subrepo.subrelpath(sub))
977 sr = sub.commit(cctx._text, user, date)
976 sr = sub.commit(cctx._text, user, date)
978 state[s] = (state[s][0], sr)
977 state[s] = (state[s][0], sr)
979 subrepo.writestate(self, state)
978 subrepo.writestate(self, state)
980
979
981 # Save commit message in case this transaction gets rolled back
980 # Save commit message in case this transaction gets rolled back
982 # (e.g. by a pretxncommit hook). Leave the content alone on
981 # (e.g. by a pretxncommit hook). Leave the content alone on
983 # the assumption that the user will use the same editor again.
982 # the assumption that the user will use the same editor again.
984 msgfile = self.opener('last-message.txt', 'wb')
983 msgfile = self.opener('last-message.txt', 'wb')
985 msgfile.write(cctx._text)
984 msgfile.write(cctx._text)
986 msgfile.close()
985 msgfile.close()
987
986
988 p1, p2 = self.dirstate.parents()
987 p1, p2 = self.dirstate.parents()
989 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
988 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
990 try:
989 try:
991 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
990 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
992 ret = self.commitctx(cctx, True)
991 ret = self.commitctx(cctx, True)
993 except:
992 except:
994 if edited:
993 if edited:
995 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
994 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
996 self.ui.write(
995 self.ui.write(
997 _('note: commit message saved in %s\n') % msgfn)
996 _('note: commit message saved in %s\n') % msgfn)
998 raise
997 raise
999
998
1000 # update bookmarks, dirstate and mergestate
999 # update bookmarks, dirstate and mergestate
1001 parents = (p1, p2)
1000 parents = (p1, p2)
1002 if p2 == nullid:
1001 if p2 == nullid:
1003 parents = (p1,)
1002 parents = (p1,)
1004 bookmarks.update(self, parents, ret)
1003 bookmarks.update(self, parents, ret)
1005 for f in changes[0] + changes[1]:
1004 for f in changes[0] + changes[1]:
1006 self.dirstate.normal(f)
1005 self.dirstate.normal(f)
1007 for f in changes[2]:
1006 for f in changes[2]:
1008 self.dirstate.forget(f)
1007 self.dirstate.forget(f)
1009 self.dirstate.setparents(ret)
1008 self.dirstate.setparents(ret)
1010 ms.reset()
1009 ms.reset()
1011 finally:
1010 finally:
1012 wlock.release()
1011 wlock.release()
1013
1012
1014 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1013 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1015 return ret
1014 return ret
1016
1015
1017 def commitctx(self, ctx, error=False):
1016 def commitctx(self, ctx, error=False):
1018 """Add a new revision to current repository.
1017 """Add a new revision to current repository.
1019 Revision information is passed via the context argument.
1018 Revision information is passed via the context argument.
1020 """
1019 """
1021
1020
1022 tr = lock = None
1021 tr = lock = None
1023 removed = list(ctx.removed())
1022 removed = list(ctx.removed())
1024 p1, p2 = ctx.p1(), ctx.p2()
1023 p1, p2 = ctx.p1(), ctx.p2()
1025 m1 = p1.manifest().copy()
1024 m1 = p1.manifest().copy()
1026 m2 = p2.manifest()
1025 m2 = p2.manifest()
1027 user = ctx.user()
1026 user = ctx.user()
1028
1027
1029 lock = self.lock()
1028 lock = self.lock()
1030 try:
1029 try:
1031 tr = self.transaction("commit")
1030 tr = self.transaction("commit")
1032 trp = weakref.proxy(tr)
1031 trp = weakref.proxy(tr)
1033
1032
1034 # check in files
1033 # check in files
1035 new = {}
1034 new = {}
1036 changed = []
1035 changed = []
1037 linkrev = len(self)
1036 linkrev = len(self)
1038 for f in sorted(ctx.modified() + ctx.added()):
1037 for f in sorted(ctx.modified() + ctx.added()):
1039 self.ui.note(f + "\n")
1038 self.ui.note(f + "\n")
1040 try:
1039 try:
1041 fctx = ctx[f]
1040 fctx = ctx[f]
1042 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1041 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1043 changed)
1042 changed)
1044 m1.set(f, fctx.flags())
1043 m1.set(f, fctx.flags())
1045 except OSError, inst:
1044 except OSError, inst:
1046 self.ui.warn(_("trouble committing %s!\n") % f)
1045 self.ui.warn(_("trouble committing %s!\n") % f)
1047 raise
1046 raise
1048 except IOError, inst:
1047 except IOError, inst:
1049 errcode = getattr(inst, 'errno', errno.ENOENT)
1048 errcode = getattr(inst, 'errno', errno.ENOENT)
1050 if error or errcode and errcode != errno.ENOENT:
1049 if error or errcode and errcode != errno.ENOENT:
1051 self.ui.warn(_("trouble committing %s!\n") % f)
1050 self.ui.warn(_("trouble committing %s!\n") % f)
1052 raise
1051 raise
1053 else:
1052 else:
1054 removed.append(f)
1053 removed.append(f)
1055
1054
1056 # update manifest
1055 # update manifest
1057 m1.update(new)
1056 m1.update(new)
1058 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1057 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1059 drop = [f for f in removed if f in m1]
1058 drop = [f for f in removed if f in m1]
1060 for f in drop:
1059 for f in drop:
1061 del m1[f]
1060 del m1[f]
1062 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1061 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1063 p2.manifestnode(), (new, drop))
1062 p2.manifestnode(), (new, drop))
1064
1063
1065 # update changelog
1064 # update changelog
1066 self.changelog.delayupdate()
1065 self.changelog.delayupdate()
1067 n = self.changelog.add(mn, changed + removed, ctx.description(),
1066 n = self.changelog.add(mn, changed + removed, ctx.description(),
1068 trp, p1.node(), p2.node(),
1067 trp, p1.node(), p2.node(),
1069 user, ctx.date(), ctx.extra().copy())
1068 user, ctx.date(), ctx.extra().copy())
1070 p = lambda: self.changelog.writepending() and self.root or ""
1069 p = lambda: self.changelog.writepending() and self.root or ""
1071 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1070 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1072 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1071 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1073 parent2=xp2, pending=p)
1072 parent2=xp2, pending=p)
1074 self.changelog.finalize(trp)
1073 self.changelog.finalize(trp)
1075 tr.close()
1074 tr.close()
1076
1075
1077 if self._branchcache:
1076 if self._branchcache:
1078 self.updatebranchcache()
1077 self.updatebranchcache()
1079 return n
1078 return n
1080 finally:
1079 finally:
1081 if tr:
1080 if tr:
1082 tr.release()
1081 tr.release()
1083 lock.release()
1082 lock.release()
1084
1083
1085 def destroyed(self):
1084 def destroyed(self):
1086 '''Inform the repository that nodes have been destroyed.
1085 '''Inform the repository that nodes have been destroyed.
1087 Intended for use by strip and rollback, so there's a common
1086 Intended for use by strip and rollback, so there's a common
1088 place for anything that has to be done after destroying history.'''
1087 place for anything that has to be done after destroying history.'''
1089 # XXX it might be nice if we could take the list of destroyed
1088 # XXX it might be nice if we could take the list of destroyed
1090 # nodes, but I don't see an easy way for rollback() to do that
1089 # nodes, but I don't see an easy way for rollback() to do that
1091
1090
1092 # Ensure the persistent tag cache is updated. Doing it now
1091 # Ensure the persistent tag cache is updated. Doing it now
1093 # means that the tag cache only has to worry about destroyed
1092 # means that the tag cache only has to worry about destroyed
1094 # heads immediately after a strip/rollback. That in turn
1093 # heads immediately after a strip/rollback. That in turn
1095 # guarantees that "cachetip == currenttip" (comparing both rev
1094 # guarantees that "cachetip == currenttip" (comparing both rev
1096 # and node) always means no nodes have been added or destroyed.
1095 # and node) always means no nodes have been added or destroyed.
1097
1096
1098 # XXX this is suboptimal when qrefresh'ing: we strip the current
1097 # XXX this is suboptimal when qrefresh'ing: we strip the current
1099 # head, refresh the tag cache, then immediately add a new head.
1098 # head, refresh the tag cache, then immediately add a new head.
1100 # But I think doing it this way is necessary for the "instant
1099 # But I think doing it this way is necessary for the "instant
1101 # tag cache retrieval" case to work.
1100 # tag cache retrieval" case to work.
1102 self.invalidatecaches()
1101 self.invalidatecaches()
1103
1102
1104 def walk(self, match, node=None):
1103 def walk(self, match, node=None):
1105 '''
1104 '''
1106 walk recursively through the directory tree or a given
1105 walk recursively through the directory tree or a given
1107 changeset, finding all files matched by the match
1106 changeset, finding all files matched by the match
1108 function
1107 function
1109 '''
1108 '''
1110 return self[node].walk(match)
1109 return self[node].walk(match)
1111
1110
1112 def status(self, node1='.', node2=None, match=None,
1111 def status(self, node1='.', node2=None, match=None,
1113 ignored=False, clean=False, unknown=False,
1112 ignored=False, clean=False, unknown=False,
1114 listsubrepos=False):
1113 listsubrepos=False):
1115 """return status of files between two nodes or node and working directory
1114 """return status of files between two nodes or node and working directory
1116
1115
1117 If node1 is None, use the first dirstate parent instead.
1116 If node1 is None, use the first dirstate parent instead.
1118 If node2 is None, compare node1 with working directory.
1117 If node2 is None, compare node1 with working directory.
1119 """
1118 """
1120
1119
1121 def mfmatches(ctx):
1120 def mfmatches(ctx):
1122 mf = ctx.manifest().copy()
1121 mf = ctx.manifest().copy()
1123 for fn in mf.keys():
1122 for fn in mf.keys():
1124 if not match(fn):
1123 if not match(fn):
1125 del mf[fn]
1124 del mf[fn]
1126 return mf
1125 return mf
1127
1126
1128 if isinstance(node1, context.changectx):
1127 if isinstance(node1, context.changectx):
1129 ctx1 = node1
1128 ctx1 = node1
1130 else:
1129 else:
1131 ctx1 = self[node1]
1130 ctx1 = self[node1]
1132 if isinstance(node2, context.changectx):
1131 if isinstance(node2, context.changectx):
1133 ctx2 = node2
1132 ctx2 = node2
1134 else:
1133 else:
1135 ctx2 = self[node2]
1134 ctx2 = self[node2]
1136
1135
1137 working = ctx2.rev() is None
1136 working = ctx2.rev() is None
1138 parentworking = working and ctx1 == self['.']
1137 parentworking = working and ctx1 == self['.']
1139 match = match or matchmod.always(self.root, self.getcwd())
1138 match = match or matchmod.always(self.root, self.getcwd())
1140 listignored, listclean, listunknown = ignored, clean, unknown
1139 listignored, listclean, listunknown = ignored, clean, unknown
1141
1140
1142 # load earliest manifest first for caching reasons
1141 # load earliest manifest first for caching reasons
1143 if not working and ctx2.rev() < ctx1.rev():
1142 if not working and ctx2.rev() < ctx1.rev():
1144 ctx2.manifest()
1143 ctx2.manifest()
1145
1144
1146 if not parentworking:
1145 if not parentworking:
1147 def bad(f, msg):
1146 def bad(f, msg):
1148 if f not in ctx1:
1147 if f not in ctx1:
1149 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1148 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1150 match.bad = bad
1149 match.bad = bad
1151
1150
1152 if working: # we need to scan the working dir
1151 if working: # we need to scan the working dir
1153 subrepos = []
1152 subrepos = []
1154 if '.hgsub' in self.dirstate:
1153 if '.hgsub' in self.dirstate:
1155 subrepos = ctx1.substate.keys()
1154 subrepos = ctx1.substate.keys()
1156 s = self.dirstate.status(match, subrepos, listignored,
1155 s = self.dirstate.status(match, subrepos, listignored,
1157 listclean, listunknown)
1156 listclean, listunknown)
1158 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1157 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1159
1158
1160 # check for any possibly clean files
1159 # check for any possibly clean files
1161 if parentworking and cmp:
1160 if parentworking and cmp:
1162 fixup = []
1161 fixup = []
1163 # do a full compare of any files that might have changed
1162 # do a full compare of any files that might have changed
1164 for f in sorted(cmp):
1163 for f in sorted(cmp):
1165 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1164 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1166 or ctx1[f].cmp(ctx2[f])):
1165 or ctx1[f].cmp(ctx2[f])):
1167 modified.append(f)
1166 modified.append(f)
1168 else:
1167 else:
1169 fixup.append(f)
1168 fixup.append(f)
1170
1169
1171 # update dirstate for files that are actually clean
1170 # update dirstate for files that are actually clean
1172 if fixup:
1171 if fixup:
1173 if listclean:
1172 if listclean:
1174 clean += fixup
1173 clean += fixup
1175
1174
1176 try:
1175 try:
1177 # updating the dirstate is optional
1176 # updating the dirstate is optional
1178 # so we don't wait on the lock
1177 # so we don't wait on the lock
1179 wlock = self.wlock(False)
1178 wlock = self.wlock(False)
1180 try:
1179 try:
1181 for f in fixup:
1180 for f in fixup:
1182 self.dirstate.normal(f)
1181 self.dirstate.normal(f)
1183 finally:
1182 finally:
1184 wlock.release()
1183 wlock.release()
1185 except error.LockError:
1184 except error.LockError:
1186 pass
1185 pass
1187
1186
1188 if not parentworking:
1187 if not parentworking:
1189 mf1 = mfmatches(ctx1)
1188 mf1 = mfmatches(ctx1)
1190 if working:
1189 if working:
1191 # we are comparing working dir against non-parent
1190 # we are comparing working dir against non-parent
1192 # generate a pseudo-manifest for the working dir
1191 # generate a pseudo-manifest for the working dir
1193 mf2 = mfmatches(self['.'])
1192 mf2 = mfmatches(self['.'])
1194 for f in cmp + modified + added:
1193 for f in cmp + modified + added:
1195 mf2[f] = None
1194 mf2[f] = None
1196 mf2.set(f, ctx2.flags(f))
1195 mf2.set(f, ctx2.flags(f))
1197 for f in removed:
1196 for f in removed:
1198 if f in mf2:
1197 if f in mf2:
1199 del mf2[f]
1198 del mf2[f]
1200 else:
1199 else:
1201 # we are comparing two revisions
1200 # we are comparing two revisions
1202 deleted, unknown, ignored = [], [], []
1201 deleted, unknown, ignored = [], [], []
1203 mf2 = mfmatches(ctx2)
1202 mf2 = mfmatches(ctx2)
1204
1203
1205 modified, added, clean = [], [], []
1204 modified, added, clean = [], [], []
1206 for fn in mf2:
1205 for fn in mf2:
1207 if fn in mf1:
1206 if fn in mf1:
1208 if (mf1.flags(fn) != mf2.flags(fn) or
1207 if (mf1.flags(fn) != mf2.flags(fn) or
1209 (mf1[fn] != mf2[fn] and
1208 (mf1[fn] != mf2[fn] and
1210 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1209 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1211 modified.append(fn)
1210 modified.append(fn)
1212 elif listclean:
1211 elif listclean:
1213 clean.append(fn)
1212 clean.append(fn)
1214 del mf1[fn]
1213 del mf1[fn]
1215 else:
1214 else:
1216 added.append(fn)
1215 added.append(fn)
1217 removed = mf1.keys()
1216 removed = mf1.keys()
1218
1217
1219 r = modified, added, removed, deleted, unknown, ignored, clean
1218 r = modified, added, removed, deleted, unknown, ignored, clean
1220
1219
1221 if listsubrepos:
1220 if listsubrepos:
1222 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1221 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1223 if working:
1222 if working:
1224 rev2 = None
1223 rev2 = None
1225 else:
1224 else:
1226 rev2 = ctx2.substate[subpath][1]
1225 rev2 = ctx2.substate[subpath][1]
1227 try:
1226 try:
1228 submatch = matchmod.narrowmatcher(subpath, match)
1227 submatch = matchmod.narrowmatcher(subpath, match)
1229 s = sub.status(rev2, match=submatch, ignored=listignored,
1228 s = sub.status(rev2, match=submatch, ignored=listignored,
1230 clean=listclean, unknown=listunknown,
1229 clean=listclean, unknown=listunknown,
1231 listsubrepos=True)
1230 listsubrepos=True)
1232 for rfiles, sfiles in zip(r, s):
1231 for rfiles, sfiles in zip(r, s):
1233 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1232 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1234 except error.LookupError:
1233 except error.LookupError:
1235 self.ui.status(_("skipping missing subrepository: %s\n")
1234 self.ui.status(_("skipping missing subrepository: %s\n")
1236 % subpath)
1235 % subpath)
1237
1236
1238 [l.sort() for l in r]
1237 [l.sort() for l in r]
1239 return r
1238 return r
1240
1239
1241 def heads(self, start=None):
1240 def heads(self, start=None):
1242 heads = self.changelog.heads(start)
1241 heads = self.changelog.heads(start)
1243 # sort the output in rev descending order
1242 # sort the output in rev descending order
1244 return sorted(heads, key=self.changelog.rev, reverse=True)
1243 return sorted(heads, key=self.changelog.rev, reverse=True)
1245
1244
1246 def branchheads(self, branch=None, start=None, closed=False):
1245 def branchheads(self, branch=None, start=None, closed=False):
1247 '''return a (possibly filtered) list of heads for the given branch
1246 '''return a (possibly filtered) list of heads for the given branch
1248
1247
1249 Heads are returned in topological order, from newest to oldest.
1248 Heads are returned in topological order, from newest to oldest.
1250 If branch is None, use the dirstate branch.
1249 If branch is None, use the dirstate branch.
1251 If start is not None, return only heads reachable from start.
1250 If start is not None, return only heads reachable from start.
1252 If closed is True, return heads that are marked as closed as well.
1251 If closed is True, return heads that are marked as closed as well.
1253 '''
1252 '''
1254 if branch is None:
1253 if branch is None:
1255 branch = self[None].branch()
1254 branch = self[None].branch()
1256 branches = self.branchmap()
1255 branches = self.branchmap()
1257 if branch not in branches:
1256 if branch not in branches:
1258 return []
1257 return []
1259 # the cache returns heads ordered lowest to highest
1258 # the cache returns heads ordered lowest to highest
1260 bheads = list(reversed(branches[branch]))
1259 bheads = list(reversed(branches[branch]))
1261 if start is not None:
1260 if start is not None:
1262 # filter out the heads that cannot be reached from startrev
1261 # filter out the heads that cannot be reached from startrev
1263 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1262 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1264 bheads = [h for h in bheads if h in fbheads]
1263 bheads = [h for h in bheads if h in fbheads]
1265 if not closed:
1264 if not closed:
1266 bheads = [h for h in bheads if
1265 bheads = [h for h in bheads if
1267 ('close' not in self.changelog.read(h)[5])]
1266 ('close' not in self.changelog.read(h)[5])]
1268 return bheads
1267 return bheads
1269
1268
1270 def branches(self, nodes):
1269 def branches(self, nodes):
1271 if not nodes:
1270 if not nodes:
1272 nodes = [self.changelog.tip()]
1271 nodes = [self.changelog.tip()]
1273 b = []
1272 b = []
1274 for n in nodes:
1273 for n in nodes:
1275 t = n
1274 t = n
1276 while 1:
1275 while 1:
1277 p = self.changelog.parents(n)
1276 p = self.changelog.parents(n)
1278 if p[1] != nullid or p[0] == nullid:
1277 if p[1] != nullid or p[0] == nullid:
1279 b.append((t, n, p[0], p[1]))
1278 b.append((t, n, p[0], p[1]))
1280 break
1279 break
1281 n = p[0]
1280 n = p[0]
1282 return b
1281 return b
1283
1282
1284 def between(self, pairs):
1283 def between(self, pairs):
1285 r = []
1284 r = []
1286
1285
1287 for top, bottom in pairs:
1286 for top, bottom in pairs:
1288 n, l, i = top, [], 0
1287 n, l, i = top, [], 0
1289 f = 1
1288 f = 1
1290
1289
1291 while n != bottom and n != nullid:
1290 while n != bottom and n != nullid:
1292 p = self.changelog.parents(n)[0]
1291 p = self.changelog.parents(n)[0]
1293 if i == f:
1292 if i == f:
1294 l.append(n)
1293 l.append(n)
1295 f = f * 2
1294 f = f * 2
1296 n = p
1295 n = p
1297 i += 1
1296 i += 1
1298
1297
1299 r.append(l)
1298 r.append(l)
1300
1299
1301 return r
1300 return r
1302
1301
1303 def pull(self, remote, heads=None, force=False):
1302 def pull(self, remote, heads=None, force=False):
1304 lock = self.lock()
1303 lock = self.lock()
1305 try:
1304 try:
1306 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1305 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1307 force=force)
1306 force=force)
1308 common, fetch, rheads = tmp
1307 common, fetch, rheads = tmp
1309 if not fetch:
1308 if not fetch:
1310 self.ui.status(_("no changes found\n"))
1309 self.ui.status(_("no changes found\n"))
1311 result = 0
1310 result = 0
1312 else:
1311 else:
1313 if heads is None and fetch == [nullid]:
1312 if heads is None and fetch == [nullid]:
1314 self.ui.status(_("requesting all changes\n"))
1313 self.ui.status(_("requesting all changes\n"))
1315 elif heads is None and remote.capable('changegroupsubset'):
1314 elif heads is None and remote.capable('changegroupsubset'):
1316 # issue1320, avoid a race if remote changed after discovery
1315 # issue1320, avoid a race if remote changed after discovery
1317 heads = rheads
1316 heads = rheads
1318
1317
1319 if heads is None:
1318 if heads is None:
1320 cg = remote.changegroup(fetch, 'pull')
1319 cg = remote.changegroup(fetch, 'pull')
1321 elif not remote.capable('changegroupsubset'):
1320 elif not remote.capable('changegroupsubset'):
1322 raise util.Abort(_("partial pull cannot be done because "
1321 raise util.Abort(_("partial pull cannot be done because "
1323 "other repository doesn't support "
1322 "other repository doesn't support "
1324 "changegroupsubset."))
1323 "changegroupsubset."))
1325 else:
1324 else:
1326 cg = remote.changegroupsubset(fetch, heads, 'pull')
1325 cg = remote.changegroupsubset(fetch, heads, 'pull')
1327 result = self.addchangegroup(cg, 'pull', remote.url(),
1326 result = self.addchangegroup(cg, 'pull', remote.url(),
1328 lock=lock)
1327 lock=lock)
1329 finally:
1328 finally:
1330 lock.release()
1329 lock.release()
1331
1330
1332 self.ui.debug("checking for updated bookmarks\n")
1331 self.ui.debug("checking for updated bookmarks\n")
1333 rb = remote.listkeys('bookmarks')
1332 rb = remote.listkeys('bookmarks')
1334 changed = False
1333 changed = False
1335 for k in rb.keys():
1334 for k in rb.keys():
1336 if k in self._bookmarks:
1335 if k in self._bookmarks:
1337 nr, nl = rb[k], self._bookmarks[k]
1336 nr, nl = rb[k], self._bookmarks[k]
1338 if nr in self:
1337 if nr in self:
1339 cr = self[nr]
1338 cr = self[nr]
1340 cl = self[nl]
1339 cl = self[nl]
1341 if cl.rev() >= cr.rev():
1340 if cl.rev() >= cr.rev():
1342 continue
1341 continue
1343 if cr in cl.descendants():
1342 if cr in cl.descendants():
1344 self._bookmarks[k] = cr.node()
1343 self._bookmarks[k] = cr.node()
1345 changed = True
1344 changed = True
1346 self.ui.status(_("updating bookmark %s\n") % k)
1345 self.ui.status(_("updating bookmark %s\n") % k)
1347 else:
1346 else:
1348 self.ui.warn(_("not updating divergent"
1347 self.ui.warn(_("not updating divergent"
1349 " bookmark %s\n") % k)
1348 " bookmark %s\n") % k)
1350 if changed:
1349 if changed:
1351 bookmarks.write(self)
1350 bookmarks.write(self)
1352
1351
1353 return result
1352 return result
1354
1353
1355 def checkpush(self, force, revs):
1354 def checkpush(self, force, revs):
1356 """Extensions can override this function if additional checks have
1355 """Extensions can override this function if additional checks have
1357 to be performed before pushing, or call it if they override push
1356 to be performed before pushing, or call it if they override push
1358 command.
1357 command.
1359 """
1358 """
1360 pass
1359 pass
1361
1360
1362 def push(self, remote, force=False, revs=None, newbranch=False):
1361 def push(self, remote, force=False, revs=None, newbranch=False):
1363 '''Push outgoing changesets (limited by revs) from the current
1362 '''Push outgoing changesets (limited by revs) from the current
1364 repository to remote. Return an integer:
1363 repository to remote. Return an integer:
1365 - 0 means HTTP error *or* nothing to push
1364 - 0 means HTTP error *or* nothing to push
1366 - 1 means we pushed and remote head count is unchanged *or*
1365 - 1 means we pushed and remote head count is unchanged *or*
1367 we have outgoing changesets but refused to push
1366 we have outgoing changesets but refused to push
1368 - other values as described by addchangegroup()
1367 - other values as described by addchangegroup()
1369 '''
1368 '''
1370 # there are two ways to push to remote repo:
1369 # there are two ways to push to remote repo:
1371 #
1370 #
1372 # addchangegroup assumes local user can lock remote
1371 # addchangegroup assumes local user can lock remote
1373 # repo (local filesystem, old ssh servers).
1372 # repo (local filesystem, old ssh servers).
1374 #
1373 #
1375 # unbundle assumes local user cannot lock remote repo (new ssh
1374 # unbundle assumes local user cannot lock remote repo (new ssh
1376 # servers, http servers).
1375 # servers, http servers).
1377
1376
1378 self.checkpush(force, revs)
1377 self.checkpush(force, revs)
1379 lock = None
1378 lock = None
1380 unbundle = remote.capable('unbundle')
1379 unbundle = remote.capable('unbundle')
1381 if not unbundle:
1380 if not unbundle:
1382 lock = remote.lock()
1381 lock = remote.lock()
1383 try:
1382 try:
1384 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1383 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1385 newbranch)
1384 newbranch)
1386 ret = remote_heads
1385 ret = remote_heads
1387 if cg is not None:
1386 if cg is not None:
1388 if unbundle:
1387 if unbundle:
1389 # local repo finds heads on server, finds out what
1388 # local repo finds heads on server, finds out what
1390 # revs it must push. once revs transferred, if server
1389 # revs it must push. once revs transferred, if server
1391 # finds it has different heads (someone else won
1390 # finds it has different heads (someone else won
1392 # commit/push race), server aborts.
1391 # commit/push race), server aborts.
1393 if force:
1392 if force:
1394 remote_heads = ['force']
1393 remote_heads = ['force']
1395 # ssh: return remote's addchangegroup()
1394 # ssh: return remote's addchangegroup()
1396 # http: return remote's addchangegroup() or 0 for error
1395 # http: return remote's addchangegroup() or 0 for error
1397 ret = remote.unbundle(cg, remote_heads, 'push')
1396 ret = remote.unbundle(cg, remote_heads, 'push')
1398 else:
1397 else:
1399 # we return an integer indicating remote head count change
1398 # we return an integer indicating remote head count change
1400 ret = remote.addchangegroup(cg, 'push', self.url(),
1399 ret = remote.addchangegroup(cg, 'push', self.url(),
1401 lock=lock)
1400 lock=lock)
1402 finally:
1401 finally:
1403 if lock is not None:
1402 if lock is not None:
1404 lock.release()
1403 lock.release()
1405
1404
1406 self.ui.debug("checking for updated bookmarks\n")
1405 self.ui.debug("checking for updated bookmarks\n")
1407 rb = remote.listkeys('bookmarks')
1406 rb = remote.listkeys('bookmarks')
1408 for k in rb.keys():
1407 for k in rb.keys():
1409 if k in self._bookmarks:
1408 if k in self._bookmarks:
1410 nr, nl = rb[k], hex(self._bookmarks[k])
1409 nr, nl = rb[k], hex(self._bookmarks[k])
1411 if nr in self:
1410 if nr in self:
1412 cr = self[nr]
1411 cr = self[nr]
1413 cl = self[nl]
1412 cl = self[nl]
1414 if cl in cr.descendants():
1413 if cl in cr.descendants():
1415 r = remote.pushkey('bookmarks', k, nr, nl)
1414 r = remote.pushkey('bookmarks', k, nr, nl)
1416 if r:
1415 if r:
1417 self.ui.status(_("updating bookmark %s\n") % k)
1416 self.ui.status(_("updating bookmark %s\n") % k)
1418 else:
1417 else:
1419 self.ui.warn(_('updating bookmark %s'
1418 self.ui.warn(_('updating bookmark %s'
1420 ' failed!\n') % k)
1419 ' failed!\n') % k)
1421
1420
1422 return ret
1421 return ret
1423
1422
1424 def changegroupinfo(self, nodes, source):
1423 def changegroupinfo(self, nodes, source):
1425 if self.ui.verbose or source == 'bundle':
1424 if self.ui.verbose or source == 'bundle':
1426 self.ui.status(_("%d changesets found\n") % len(nodes))
1425 self.ui.status(_("%d changesets found\n") % len(nodes))
1427 if self.ui.debugflag:
1426 if self.ui.debugflag:
1428 self.ui.debug("list of changesets:\n")
1427 self.ui.debug("list of changesets:\n")
1429 for node in nodes:
1428 for node in nodes:
1430 self.ui.debug("%s\n" % hex(node))
1429 self.ui.debug("%s\n" % hex(node))
1431
1430
1432 def changegroupsubset(self, bases, heads, source, extranodes=None):
1431 def changegroupsubset(self, bases, heads, source, extranodes=None):
1433 """Compute a changegroup consisting of all the nodes that are
1432 """Compute a changegroup consisting of all the nodes that are
1434 descendents of any of the bases and ancestors of any of the heads.
1433 descendents of any of the bases and ancestors of any of the heads.
1435 Return a chunkbuffer object whose read() method will return
1434 Return a chunkbuffer object whose read() method will return
1436 successive changegroup chunks.
1435 successive changegroup chunks.
1437
1436
1438 It is fairly complex as determining which filenodes and which
1437 It is fairly complex as determining which filenodes and which
1439 manifest nodes need to be included for the changeset to be complete
1438 manifest nodes need to be included for the changeset to be complete
1440 is non-trivial.
1439 is non-trivial.
1441
1440
1442 Another wrinkle is doing the reverse, figuring out which changeset in
1441 Another wrinkle is doing the reverse, figuring out which changeset in
1443 the changegroup a particular filenode or manifestnode belongs to.
1442 the changegroup a particular filenode or manifestnode belongs to.
1444
1443
1445 The caller can specify some nodes that must be included in the
1444 The caller can specify some nodes that must be included in the
1446 changegroup using the extranodes argument. It should be a dict
1445 changegroup using the extranodes argument. It should be a dict
1447 where the keys are the filenames (or 1 for the manifest), and the
1446 where the keys are the filenames (or 1 for the manifest), and the
1448 values are lists of (node, linknode) tuples, where node is a wanted
1447 values are lists of (node, linknode) tuples, where node is a wanted
1449 node and linknode is the changelog node that should be transmitted as
1448 node and linknode is the changelog node that should be transmitted as
1450 the linkrev.
1449 the linkrev.
1451 """
1450 """
1452
1451
1453 # Set up some initial variables
1452 # Set up some initial variables
1454 # Make it easy to refer to self.changelog
1453 # Make it easy to refer to self.changelog
1455 cl = self.changelog
1454 cl = self.changelog
1456 # Compute the list of changesets in this changegroup.
1455 # Compute the list of changesets in this changegroup.
1457 # Some bases may turn out to be superfluous, and some heads may be
1456 # Some bases may turn out to be superfluous, and some heads may be
1458 # too. nodesbetween will return the minimal set of bases and heads
1457 # too. nodesbetween will return the minimal set of bases and heads
1459 # necessary to re-create the changegroup.
1458 # necessary to re-create the changegroup.
1460 if not bases:
1459 if not bases:
1461 bases = [nullid]
1460 bases = [nullid]
1462 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1461 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1463
1462
1464 if extranodes is None:
1463 if extranodes is None:
1465 # can we go through the fast path ?
1464 # can we go through the fast path ?
1466 heads.sort()
1465 heads.sort()
1467 allheads = self.heads()
1466 allheads = self.heads()
1468 allheads.sort()
1467 allheads.sort()
1469 if heads == allheads:
1468 if heads == allheads:
1470 return self._changegroup(msng_cl_lst, source)
1469 return self._changegroup(msng_cl_lst, source)
1471
1470
1472 # slow path
1471 # slow path
1473 self.hook('preoutgoing', throw=True, source=source)
1472 self.hook('preoutgoing', throw=True, source=source)
1474
1473
1475 self.changegroupinfo(msng_cl_lst, source)
1474 self.changegroupinfo(msng_cl_lst, source)
1476
1475
1477 # We assume that all ancestors of bases are known
1476 # We assume that all ancestors of bases are known
1478 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1477 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1479
1478
1480 # Make it easy to refer to self.manifest
1479 # Make it easy to refer to self.manifest
1481 mnfst = self.manifest
1480 mnfst = self.manifest
1482 # We don't know which manifests are missing yet
1481 # We don't know which manifests are missing yet
1483 msng_mnfst_set = {}
1482 msng_mnfst_set = {}
1484 # Nor do we know which filenodes are missing.
1483 # Nor do we know which filenodes are missing.
1485 msng_filenode_set = {}
1484 msng_filenode_set = {}
1486
1485
1487 # A changeset always belongs to itself, so the changenode lookup
1486 # A changeset always belongs to itself, so the changenode lookup
1488 # function for a changenode is identity.
1487 # function for a changenode is identity.
1489 def identity(x):
1488 def identity(x):
1490 return x
1489 return x
1491
1490
1492 # A function generating function that sets up the initial environment
1491 # A function generating function that sets up the initial environment
1493 # the inner function.
1492 # the inner function.
1494 def filenode_collector(changedfiles):
1493 def filenode_collector(changedfiles):
1495 # This gathers information from each manifestnode included in the
1494 # This gathers information from each manifestnode included in the
1496 # changegroup about which filenodes the manifest node references
1495 # changegroup about which filenodes the manifest node references
1497 # so we can include those in the changegroup too.
1496 # so we can include those in the changegroup too.
1498 #
1497 #
1499 # It also remembers which changenode each filenode belongs to. It
1498 # It also remembers which changenode each filenode belongs to. It
1500 # does this by assuming the a filenode belongs to the changenode
1499 # does this by assuming the a filenode belongs to the changenode
1501 # the first manifest that references it belongs to.
1500 # the first manifest that references it belongs to.
1502 def collect_msng_filenodes(mnfstnode):
1501 def collect_msng_filenodes(mnfstnode):
1503 r = mnfst.rev(mnfstnode)
1502 r = mnfst.rev(mnfstnode)
1504 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1503 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1505 # If the previous rev is one of the parents,
1504 # If the previous rev is one of the parents,
1506 # we only need to see a diff.
1505 # we only need to see a diff.
1507 deltamf = mnfst.readdelta(mnfstnode)
1506 deltamf = mnfst.readdelta(mnfstnode)
1508 # For each line in the delta
1507 # For each line in the delta
1509 for f, fnode in deltamf.iteritems():
1508 for f, fnode in deltamf.iteritems():
1510 # And if the file is in the list of files we care
1509 # And if the file is in the list of files we care
1511 # about.
1510 # about.
1512 if f in changedfiles:
1511 if f in changedfiles:
1513 # Get the changenode this manifest belongs to
1512 # Get the changenode this manifest belongs to
1514 clnode = msng_mnfst_set[mnfstnode]
1513 clnode = msng_mnfst_set[mnfstnode]
1515 # Create the set of filenodes for the file if
1514 # Create the set of filenodes for the file if
1516 # there isn't one already.
1515 # there isn't one already.
1517 ndset = msng_filenode_set.setdefault(f, {})
1516 ndset = msng_filenode_set.setdefault(f, {})
1518 # And set the filenode's changelog node to the
1517 # And set the filenode's changelog node to the
1519 # manifest's if it hasn't been set already.
1518 # manifest's if it hasn't been set already.
1520 ndset.setdefault(fnode, clnode)
1519 ndset.setdefault(fnode, clnode)
1521 else:
1520 else:
1522 # Otherwise we need a full manifest.
1521 # Otherwise we need a full manifest.
1523 m = mnfst.read(mnfstnode)
1522 m = mnfst.read(mnfstnode)
1524 # For every file in we care about.
1523 # For every file in we care about.
1525 for f in changedfiles:
1524 for f in changedfiles:
1526 fnode = m.get(f, None)
1525 fnode = m.get(f, None)
1527 # If it's in the manifest
1526 # If it's in the manifest
1528 if fnode is not None:
1527 if fnode is not None:
1529 # See comments above.
1528 # See comments above.
1530 clnode = msng_mnfst_set[mnfstnode]
1529 clnode = msng_mnfst_set[mnfstnode]
1531 ndset = msng_filenode_set.setdefault(f, {})
1530 ndset = msng_filenode_set.setdefault(f, {})
1532 ndset.setdefault(fnode, clnode)
1531 ndset.setdefault(fnode, clnode)
1533 return collect_msng_filenodes
1532 return collect_msng_filenodes
1534
1533
1535 # If we determine that a particular file or manifest node must be a
1534 # If we determine that a particular file or manifest node must be a
1536 # node that the recipient of the changegroup will already have, we can
1535 # node that the recipient of the changegroup will already have, we can
1537 # also assume the recipient will have all the parents. This function
1536 # also assume the recipient will have all the parents. This function
1538 # prunes them from the set of missing nodes.
1537 # prunes them from the set of missing nodes.
1539 def prune(revlog, missingnodes):
1538 def prune(revlog, missingnodes):
1540 hasset = set()
1539 hasset = set()
1541 # If a 'missing' filenode thinks it belongs to a changenode we
1540 # If a 'missing' filenode thinks it belongs to a changenode we
1542 # assume the recipient must have, then the recipient must have
1541 # assume the recipient must have, then the recipient must have
1543 # that filenode.
1542 # that filenode.
1544 for n in missingnodes:
1543 for n in missingnodes:
1545 clrev = revlog.linkrev(revlog.rev(n))
1544 clrev = revlog.linkrev(revlog.rev(n))
1546 if clrev in commonrevs:
1545 if clrev in commonrevs:
1547 hasset.add(n)
1546 hasset.add(n)
1548 for n in hasset:
1547 for n in hasset:
1549 missingnodes.pop(n, None)
1548 missingnodes.pop(n, None)
1550 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1549 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1551 missingnodes.pop(revlog.node(r), None)
1550 missingnodes.pop(revlog.node(r), None)
1552
1551
1553 # Add the nodes that were explicitly requested.
1552 # Add the nodes that were explicitly requested.
1554 def add_extra_nodes(name, nodes):
1553 def add_extra_nodes(name, nodes):
1555 if not extranodes or name not in extranodes:
1554 if not extranodes or name not in extranodes:
1556 return
1555 return
1557
1556
1558 for node, linknode in extranodes[name]:
1557 for node, linknode in extranodes[name]:
1559 if node not in nodes:
1558 if node not in nodes:
1560 nodes[node] = linknode
1559 nodes[node] = linknode
1561
1560
1562 # Now that we have all theses utility functions to help out and
1561 # Now that we have all theses utility functions to help out and
1563 # logically divide up the task, generate the group.
1562 # logically divide up the task, generate the group.
1564 def gengroup():
1563 def gengroup():
1565 # The set of changed files starts empty.
1564 # The set of changed files starts empty.
1566 changedfiles = set()
1565 changedfiles = set()
1567 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1566 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1568
1567
1569 # Create a changenode group generator that will call our functions
1568 # Create a changenode group generator that will call our functions
1570 # back to lookup the owning changenode and collect information.
1569 # back to lookup the owning changenode and collect information.
1571 group = cl.group(msng_cl_lst, identity, collect)
1570 group = cl.group(msng_cl_lst, identity, collect)
1572 for cnt, chnk in enumerate(group):
1571 for cnt, chnk in enumerate(group):
1573 yield chnk
1572 yield chnk
1574 # revlog.group yields three entries per node, so
1573 # revlog.group yields three entries per node, so
1575 # dividing by 3 gives an approximation of how many
1574 # dividing by 3 gives an approximation of how many
1576 # nodes have been processed.
1575 # nodes have been processed.
1577 self.ui.progress(_('bundling'), cnt / 3,
1576 self.ui.progress(_('bundling'), cnt / 3,
1578 unit=_('changesets'))
1577 unit=_('changesets'))
1579 changecount = cnt / 3
1578 changecount = cnt / 3
1580 self.ui.progress(_('bundling'), None)
1579 self.ui.progress(_('bundling'), None)
1581
1580
1582 prune(mnfst, msng_mnfst_set)
1581 prune(mnfst, msng_mnfst_set)
1583 add_extra_nodes(1, msng_mnfst_set)
1582 add_extra_nodes(1, msng_mnfst_set)
1584 msng_mnfst_lst = msng_mnfst_set.keys()
1583 msng_mnfst_lst = msng_mnfst_set.keys()
1585 # Sort the manifestnodes by revision number.
1584 # Sort the manifestnodes by revision number.
1586 msng_mnfst_lst.sort(key=mnfst.rev)
1585 msng_mnfst_lst.sort(key=mnfst.rev)
1587 # Create a generator for the manifestnodes that calls our lookup
1586 # Create a generator for the manifestnodes that calls our lookup
1588 # and data collection functions back.
1587 # and data collection functions back.
1589 group = mnfst.group(msng_mnfst_lst,
1588 group = mnfst.group(msng_mnfst_lst,
1590 lambda mnode: msng_mnfst_set[mnode],
1589 lambda mnode: msng_mnfst_set[mnode],
1591 filenode_collector(changedfiles))
1590 filenode_collector(changedfiles))
1592 efiles = {}
1591 efiles = {}
1593 for cnt, chnk in enumerate(group):
1592 for cnt, chnk in enumerate(group):
1594 if cnt % 3 == 1:
1593 if cnt % 3 == 1:
1595 mnode = chnk[:20]
1594 mnode = chnk[:20]
1596 efiles.update(mnfst.readdelta(mnode))
1595 efiles.update(mnfst.readdelta(mnode))
1597 yield chnk
1596 yield chnk
1598 # see above comment for why we divide by 3
1597 # see above comment for why we divide by 3
1599 self.ui.progress(_('bundling'), cnt / 3,
1598 self.ui.progress(_('bundling'), cnt / 3,
1600 unit=_('manifests'), total=changecount)
1599 unit=_('manifests'), total=changecount)
1601 self.ui.progress(_('bundling'), None)
1600 self.ui.progress(_('bundling'), None)
1602 efiles = len(efiles)
1601 efiles = len(efiles)
1603
1602
1604 # These are no longer needed, dereference and toss the memory for
1603 # These are no longer needed, dereference and toss the memory for
1605 # them.
1604 # them.
1606 msng_mnfst_lst = None
1605 msng_mnfst_lst = None
1607 msng_mnfst_set.clear()
1606 msng_mnfst_set.clear()
1608
1607
1609 if extranodes:
1608 if extranodes:
1610 for fname in extranodes:
1609 for fname in extranodes:
1611 if isinstance(fname, int):
1610 if isinstance(fname, int):
1612 continue
1611 continue
1613 msng_filenode_set.setdefault(fname, {})
1612 msng_filenode_set.setdefault(fname, {})
1614 changedfiles.add(fname)
1613 changedfiles.add(fname)
1615 # Go through all our files in order sorted by name.
1614 # Go through all our files in order sorted by name.
1616 for idx, fname in enumerate(sorted(changedfiles)):
1615 for idx, fname in enumerate(sorted(changedfiles)):
1617 filerevlog = self.file(fname)
1616 filerevlog = self.file(fname)
1618 if not len(filerevlog):
1617 if not len(filerevlog):
1619 raise util.Abort(_("empty or missing revlog for %s") % fname)
1618 raise util.Abort(_("empty or missing revlog for %s") % fname)
1620 # Toss out the filenodes that the recipient isn't really
1619 # Toss out the filenodes that the recipient isn't really
1621 # missing.
1620 # missing.
1622 missingfnodes = msng_filenode_set.pop(fname, {})
1621 missingfnodes = msng_filenode_set.pop(fname, {})
1623 prune(filerevlog, missingfnodes)
1622 prune(filerevlog, missingfnodes)
1624 add_extra_nodes(fname, missingfnodes)
1623 add_extra_nodes(fname, missingfnodes)
1625 # If any filenodes are left, generate the group for them,
1624 # If any filenodes are left, generate the group for them,
1626 # otherwise don't bother.
1625 # otherwise don't bother.
1627 if missingfnodes:
1626 if missingfnodes:
1628 yield changegroup.chunkheader(len(fname))
1627 yield changegroup.chunkheader(len(fname))
1629 yield fname
1628 yield fname
1630 # Sort the filenodes by their revision # (topological order)
1629 # Sort the filenodes by their revision # (topological order)
1631 nodeiter = list(missingfnodes)
1630 nodeiter = list(missingfnodes)
1632 nodeiter.sort(key=filerevlog.rev)
1631 nodeiter.sort(key=filerevlog.rev)
1633 # Create a group generator and only pass in a changenode
1632 # Create a group generator and only pass in a changenode
1634 # lookup function as we need to collect no information
1633 # lookup function as we need to collect no information
1635 # from filenodes.
1634 # from filenodes.
1636 group = filerevlog.group(nodeiter,
1635 group = filerevlog.group(nodeiter,
1637 lambda fnode: missingfnodes[fnode])
1636 lambda fnode: missingfnodes[fnode])
1638 for chnk in group:
1637 for chnk in group:
1639 # even though we print the same progress on
1638 # even though we print the same progress on
1640 # most loop iterations, put the progress call
1639 # most loop iterations, put the progress call
1641 # here so that time estimates (if any) can be updated
1640 # here so that time estimates (if any) can be updated
1642 self.ui.progress(
1641 self.ui.progress(
1643 _('bundling'), idx, item=fname,
1642 _('bundling'), idx, item=fname,
1644 unit=_('files'), total=efiles)
1643 unit=_('files'), total=efiles)
1645 yield chnk
1644 yield chnk
1646 # Signal that no more groups are left.
1645 # Signal that no more groups are left.
1647 yield changegroup.closechunk()
1646 yield changegroup.closechunk()
1648 self.ui.progress(_('bundling'), None)
1647 self.ui.progress(_('bundling'), None)
1649
1648
1650 if msng_cl_lst:
1649 if msng_cl_lst:
1651 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1650 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1652
1651
1653 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1652 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1654
1653
1655 def changegroup(self, basenodes, source):
1654 def changegroup(self, basenodes, source):
1656 # to avoid a race we use changegroupsubset() (issue1320)
1655 # to avoid a race we use changegroupsubset() (issue1320)
1657 return self.changegroupsubset(basenodes, self.heads(), source)
1656 return self.changegroupsubset(basenodes, self.heads(), source)
1658
1657
1659 def _changegroup(self, nodes, source):
1658 def _changegroup(self, nodes, source):
1660 """Compute the changegroup of all nodes that we have that a recipient
1659 """Compute the changegroup of all nodes that we have that a recipient
1661 doesn't. Return a chunkbuffer object whose read() method will return
1660 doesn't. Return a chunkbuffer object whose read() method will return
1662 successive changegroup chunks.
1661 successive changegroup chunks.
1663
1662
1664 This is much easier than the previous function as we can assume that
1663 This is much easier than the previous function as we can assume that
1665 the recipient has any changenode we aren't sending them.
1664 the recipient has any changenode we aren't sending them.
1666
1665
1667 nodes is the set of nodes to send"""
1666 nodes is the set of nodes to send"""
1668
1667
1669 self.hook('preoutgoing', throw=True, source=source)
1668 self.hook('preoutgoing', throw=True, source=source)
1670
1669
1671 cl = self.changelog
1670 cl = self.changelog
1672 revset = set([cl.rev(n) for n in nodes])
1671 revset = set([cl.rev(n) for n in nodes])
1673 self.changegroupinfo(nodes, source)
1672 self.changegroupinfo(nodes, source)
1674
1673
1675 def identity(x):
1674 def identity(x):
1676 return x
1675 return x
1677
1676
1678 def gennodelst(log):
1677 def gennodelst(log):
1679 for r in log:
1678 for r in log:
1680 if log.linkrev(r) in revset:
1679 if log.linkrev(r) in revset:
1681 yield log.node(r)
1680 yield log.node(r)
1682
1681
1683 def lookuplinkrev_func(revlog):
1682 def lookuplinkrev_func(revlog):
1684 def lookuplinkrev(n):
1683 def lookuplinkrev(n):
1685 return cl.node(revlog.linkrev(revlog.rev(n)))
1684 return cl.node(revlog.linkrev(revlog.rev(n)))
1686 return lookuplinkrev
1685 return lookuplinkrev
1687
1686
1688 def gengroup():
1687 def gengroup():
1689 '''yield a sequence of changegroup chunks (strings)'''
1688 '''yield a sequence of changegroup chunks (strings)'''
1690 # construct a list of all changed files
1689 # construct a list of all changed files
1691 changedfiles = set()
1690 changedfiles = set()
1692 mmfs = {}
1691 mmfs = {}
1693 collect = changegroup.collector(cl, mmfs, changedfiles)
1692 collect = changegroup.collector(cl, mmfs, changedfiles)
1694
1693
1695 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1694 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1696 # revlog.group yields three entries per node, so
1695 # revlog.group yields three entries per node, so
1697 # dividing by 3 gives an approximation of how many
1696 # dividing by 3 gives an approximation of how many
1698 # nodes have been processed.
1697 # nodes have been processed.
1699 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1698 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1700 yield chnk
1699 yield chnk
1701 changecount = cnt / 3
1700 changecount = cnt / 3
1702 self.ui.progress(_('bundling'), None)
1701 self.ui.progress(_('bundling'), None)
1703
1702
1704 mnfst = self.manifest
1703 mnfst = self.manifest
1705 nodeiter = gennodelst(mnfst)
1704 nodeiter = gennodelst(mnfst)
1706 efiles = {}
1705 efiles = {}
1707 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1706 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1708 lookuplinkrev_func(mnfst))):
1707 lookuplinkrev_func(mnfst))):
1709 if cnt % 3 == 1:
1708 if cnt % 3 == 1:
1710 mnode = chnk[:20]
1709 mnode = chnk[:20]
1711 efiles.update(mnfst.readdelta(mnode))
1710 efiles.update(mnfst.readdelta(mnode))
1712 # see above comment for why we divide by 3
1711 # see above comment for why we divide by 3
1713 self.ui.progress(_('bundling'), cnt / 3,
1712 self.ui.progress(_('bundling'), cnt / 3,
1714 unit=_('manifests'), total=changecount)
1713 unit=_('manifests'), total=changecount)
1715 yield chnk
1714 yield chnk
1716 efiles = len(efiles)
1715 efiles = len(efiles)
1717 self.ui.progress(_('bundling'), None)
1716 self.ui.progress(_('bundling'), None)
1718
1717
1719 for idx, fname in enumerate(sorted(changedfiles)):
1718 for idx, fname in enumerate(sorted(changedfiles)):
1720 filerevlog = self.file(fname)
1719 filerevlog = self.file(fname)
1721 if not len(filerevlog):
1720 if not len(filerevlog):
1722 raise util.Abort(_("empty or missing revlog for %s") % fname)
1721 raise util.Abort(_("empty or missing revlog for %s") % fname)
1723 nodeiter = gennodelst(filerevlog)
1722 nodeiter = gennodelst(filerevlog)
1724 nodeiter = list(nodeiter)
1723 nodeiter = list(nodeiter)
1725 if nodeiter:
1724 if nodeiter:
1726 yield changegroup.chunkheader(len(fname))
1725 yield changegroup.chunkheader(len(fname))
1727 yield fname
1726 yield fname
1728 lookup = lookuplinkrev_func(filerevlog)
1727 lookup = lookuplinkrev_func(filerevlog)
1729 for chnk in filerevlog.group(nodeiter, lookup):
1728 for chnk in filerevlog.group(nodeiter, lookup):
1730 self.ui.progress(
1729 self.ui.progress(
1731 _('bundling'), idx, item=fname,
1730 _('bundling'), idx, item=fname,
1732 total=efiles, unit=_('files'))
1731 total=efiles, unit=_('files'))
1733 yield chnk
1732 yield chnk
1734 self.ui.progress(_('bundling'), None)
1733 self.ui.progress(_('bundling'), None)
1735
1734
1736 yield changegroup.closechunk()
1735 yield changegroup.closechunk()
1737
1736
1738 if nodes:
1737 if nodes:
1739 self.hook('outgoing', node=hex(nodes[0]), source=source)
1738 self.hook('outgoing', node=hex(nodes[0]), source=source)
1740
1739
1741 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1740 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1742
1741
1743 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1742 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1744 """Add the changegroup returned by source.read() to this repo.
1743 """Add the changegroup returned by source.read() to this repo.
1745 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1744 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1746 the URL of the repo where this changegroup is coming from.
1745 the URL of the repo where this changegroup is coming from.
1747 If lock is not None, the function takes ownership of the lock
1746 If lock is not None, the function takes ownership of the lock
1748 and releases it after the changegroup is added.
1747 and releases it after the changegroup is added.
1749
1748
1750 Return an integer summarizing the change to this repo:
1749 Return an integer summarizing the change to this repo:
1751 - nothing changed or no source: 0
1750 - nothing changed or no source: 0
1752 - more heads than before: 1+added heads (2..n)
1751 - more heads than before: 1+added heads (2..n)
1753 - fewer heads than before: -1-removed heads (-2..-n)
1752 - fewer heads than before: -1-removed heads (-2..-n)
1754 - number of heads stays the same: 1
1753 - number of heads stays the same: 1
1755 """
1754 """
1756 def csmap(x):
1755 def csmap(x):
1757 self.ui.debug("add changeset %s\n" % short(x))
1756 self.ui.debug("add changeset %s\n" % short(x))
1758 return len(cl)
1757 return len(cl)
1759
1758
1760 def revmap(x):
1759 def revmap(x):
1761 return cl.rev(x)
1760 return cl.rev(x)
1762
1761
1763 if not source:
1762 if not source:
1764 return 0
1763 return 0
1765
1764
1766 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1765 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1767
1766
1768 changesets = files = revisions = 0
1767 changesets = files = revisions = 0
1769 efiles = set()
1768 efiles = set()
1770
1769
1771 # write changelog data to temp files so concurrent readers will not see
1770 # write changelog data to temp files so concurrent readers will not see
1772 # inconsistent view
1771 # inconsistent view
1773 cl = self.changelog
1772 cl = self.changelog
1774 cl.delayupdate()
1773 cl.delayupdate()
1775 oldheads = len(cl.heads())
1774 oldheads = len(cl.heads())
1776
1775
1777 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1776 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1778 try:
1777 try:
1779 trp = weakref.proxy(tr)
1778 trp = weakref.proxy(tr)
1780 # pull off the changeset group
1779 # pull off the changeset group
1781 self.ui.status(_("adding changesets\n"))
1780 self.ui.status(_("adding changesets\n"))
1782 clstart = len(cl)
1781 clstart = len(cl)
1783 class prog(object):
1782 class prog(object):
1784 step = _('changesets')
1783 step = _('changesets')
1785 count = 1
1784 count = 1
1786 ui = self.ui
1785 ui = self.ui
1787 total = None
1786 total = None
1788 def __call__(self):
1787 def __call__(self):
1789 self.ui.progress(self.step, self.count, unit=_('chunks'),
1788 self.ui.progress(self.step, self.count, unit=_('chunks'),
1790 total=self.total)
1789 total=self.total)
1791 self.count += 1
1790 self.count += 1
1792 pr = prog()
1791 pr = prog()
1793 source.callback = pr
1792 source.callback = pr
1794
1793
1795 if (cl.addgroup(source, csmap, trp) is None
1794 if (cl.addgroup(source, csmap, trp) is None
1796 and not emptyok):
1795 and not emptyok):
1797 raise util.Abort(_("received changelog group is empty"))
1796 raise util.Abort(_("received changelog group is empty"))
1798 clend = len(cl)
1797 clend = len(cl)
1799 changesets = clend - clstart
1798 changesets = clend - clstart
1800 for c in xrange(clstart, clend):
1799 for c in xrange(clstart, clend):
1801 efiles.update(self[c].files())
1800 efiles.update(self[c].files())
1802 efiles = len(efiles)
1801 efiles = len(efiles)
1803 self.ui.progress(_('changesets'), None)
1802 self.ui.progress(_('changesets'), None)
1804
1803
1805 # pull off the manifest group
1804 # pull off the manifest group
1806 self.ui.status(_("adding manifests\n"))
1805 self.ui.status(_("adding manifests\n"))
1807 pr.step = _('manifests')
1806 pr.step = _('manifests')
1808 pr.count = 1
1807 pr.count = 1
1809 pr.total = changesets # manifests <= changesets
1808 pr.total = changesets # manifests <= changesets
1810 # no need to check for empty manifest group here:
1809 # no need to check for empty manifest group here:
1811 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1810 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1812 # no new manifest will be created and the manifest group will
1811 # no new manifest will be created and the manifest group will
1813 # be empty during the pull
1812 # be empty during the pull
1814 self.manifest.addgroup(source, revmap, trp)
1813 self.manifest.addgroup(source, revmap, trp)
1815 self.ui.progress(_('manifests'), None)
1814 self.ui.progress(_('manifests'), None)
1816
1815
1817 needfiles = {}
1816 needfiles = {}
1818 if self.ui.configbool('server', 'validate', default=False):
1817 if self.ui.configbool('server', 'validate', default=False):
1819 # validate incoming csets have their manifests
1818 # validate incoming csets have their manifests
1820 for cset in xrange(clstart, clend):
1819 for cset in xrange(clstart, clend):
1821 mfest = self.changelog.read(self.changelog.node(cset))[0]
1820 mfest = self.changelog.read(self.changelog.node(cset))[0]
1822 mfest = self.manifest.readdelta(mfest)
1821 mfest = self.manifest.readdelta(mfest)
1823 # store file nodes we must see
1822 # store file nodes we must see
1824 for f, n in mfest.iteritems():
1823 for f, n in mfest.iteritems():
1825 needfiles.setdefault(f, set()).add(n)
1824 needfiles.setdefault(f, set()).add(n)
1826
1825
1827 # process the files
1826 # process the files
1828 self.ui.status(_("adding file changes\n"))
1827 self.ui.status(_("adding file changes\n"))
1829 pr.step = 'files'
1828 pr.step = 'files'
1830 pr.count = 1
1829 pr.count = 1
1831 pr.total = efiles
1830 pr.total = efiles
1832 source.callback = None
1831 source.callback = None
1833
1832
1834 while 1:
1833 while 1:
1835 f = source.chunk()
1834 f = source.chunk()
1836 if not f:
1835 if not f:
1837 break
1836 break
1838 self.ui.debug("adding %s revisions\n" % f)
1837 self.ui.debug("adding %s revisions\n" % f)
1839 pr()
1838 pr()
1840 fl = self.file(f)
1839 fl = self.file(f)
1841 o = len(fl)
1840 o = len(fl)
1842 if fl.addgroup(source, revmap, trp) is None:
1841 if fl.addgroup(source, revmap, trp) is None:
1843 raise util.Abort(_("received file revlog group is empty"))
1842 raise util.Abort(_("received file revlog group is empty"))
1844 revisions += len(fl) - o
1843 revisions += len(fl) - o
1845 files += 1
1844 files += 1
1846 if f in needfiles:
1845 if f in needfiles:
1847 needs = needfiles[f]
1846 needs = needfiles[f]
1848 for new in xrange(o, len(fl)):
1847 for new in xrange(o, len(fl)):
1849 n = fl.node(new)
1848 n = fl.node(new)
1850 if n in needs:
1849 if n in needs:
1851 needs.remove(n)
1850 needs.remove(n)
1852 if not needs:
1851 if not needs:
1853 del needfiles[f]
1852 del needfiles[f]
1854 self.ui.progress(_('files'), None)
1853 self.ui.progress(_('files'), None)
1855
1854
1856 for f, needs in needfiles.iteritems():
1855 for f, needs in needfiles.iteritems():
1857 fl = self.file(f)
1856 fl = self.file(f)
1858 for n in needs:
1857 for n in needs:
1859 try:
1858 try:
1860 fl.rev(n)
1859 fl.rev(n)
1861 except error.LookupError:
1860 except error.LookupError:
1862 raise util.Abort(
1861 raise util.Abort(
1863 _('missing file data for %s:%s - run hg verify') %
1862 _('missing file data for %s:%s - run hg verify') %
1864 (f, hex(n)))
1863 (f, hex(n)))
1865
1864
1866 newheads = len(cl.heads())
1865 newheads = len(cl.heads())
1867 heads = ""
1866 heads = ""
1868 if oldheads and newheads != oldheads:
1867 if oldheads and newheads != oldheads:
1869 heads = _(" (%+d heads)") % (newheads - oldheads)
1868 heads = _(" (%+d heads)") % (newheads - oldheads)
1870
1869
1871 self.ui.status(_("added %d changesets"
1870 self.ui.status(_("added %d changesets"
1872 " with %d changes to %d files%s\n")
1871 " with %d changes to %d files%s\n")
1873 % (changesets, revisions, files, heads))
1872 % (changesets, revisions, files, heads))
1874
1873
1875 if changesets > 0:
1874 if changesets > 0:
1876 p = lambda: cl.writepending() and self.root or ""
1875 p = lambda: cl.writepending() and self.root or ""
1877 self.hook('pretxnchangegroup', throw=True,
1876 self.hook('pretxnchangegroup', throw=True,
1878 node=hex(cl.node(clstart)), source=srctype,
1877 node=hex(cl.node(clstart)), source=srctype,
1879 url=url, pending=p)
1878 url=url, pending=p)
1880
1879
1881 # make changelog see real files again
1880 # make changelog see real files again
1882 cl.finalize(trp)
1881 cl.finalize(trp)
1883
1882
1884 tr.close()
1883 tr.close()
1885 finally:
1884 finally:
1886 tr.release()
1885 tr.release()
1887 if lock:
1886 if lock:
1888 lock.release()
1887 lock.release()
1889
1888
1890 if changesets > 0:
1889 if changesets > 0:
1891 # forcefully update the on-disk branch cache
1890 # forcefully update the on-disk branch cache
1892 self.ui.debug("updating the branch cache\n")
1891 self.ui.debug("updating the branch cache\n")
1893 self.updatebranchcache()
1892 self.updatebranchcache()
1894 self.hook("changegroup", node=hex(cl.node(clstart)),
1893 self.hook("changegroup", node=hex(cl.node(clstart)),
1895 source=srctype, url=url)
1894 source=srctype, url=url)
1896
1895
1897 for i in xrange(clstart, clend):
1896 for i in xrange(clstart, clend):
1898 self.hook("incoming", node=hex(cl.node(i)),
1897 self.hook("incoming", node=hex(cl.node(i)),
1899 source=srctype, url=url)
1898 source=srctype, url=url)
1900
1899
1901 # FIXME - why does this care about tip?
1900 # FIXME - why does this care about tip?
1902 if newheads == oldheads:
1901 if newheads == oldheads:
1903 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1902 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1904
1903
1905 # never return 0 here:
1904 # never return 0 here:
1906 if newheads < oldheads:
1905 if newheads < oldheads:
1907 return newheads - oldheads - 1
1906 return newheads - oldheads - 1
1908 else:
1907 else:
1909 return newheads - oldheads + 1
1908 return newheads - oldheads + 1
1910
1909
1911
1910
1912 def stream_in(self, remote, requirements):
1911 def stream_in(self, remote, requirements):
1913 fp = remote.stream_out()
1912 fp = remote.stream_out()
1914 l = fp.readline()
1913 l = fp.readline()
1915 try:
1914 try:
1916 resp = int(l)
1915 resp = int(l)
1917 except ValueError:
1916 except ValueError:
1918 raise error.ResponseError(
1917 raise error.ResponseError(
1919 _('Unexpected response from remote server:'), l)
1918 _('Unexpected response from remote server:'), l)
1920 if resp == 1:
1919 if resp == 1:
1921 raise util.Abort(_('operation forbidden by server'))
1920 raise util.Abort(_('operation forbidden by server'))
1922 elif resp == 2:
1921 elif resp == 2:
1923 raise util.Abort(_('locking the remote repository failed'))
1922 raise util.Abort(_('locking the remote repository failed'))
1924 elif resp != 0:
1923 elif resp != 0:
1925 raise util.Abort(_('the server sent an unknown error code'))
1924 raise util.Abort(_('the server sent an unknown error code'))
1926 self.ui.status(_('streaming all changes\n'))
1925 self.ui.status(_('streaming all changes\n'))
1927 l = fp.readline()
1926 l = fp.readline()
1928 try:
1927 try:
1929 total_files, total_bytes = map(int, l.split(' ', 1))
1928 total_files, total_bytes = map(int, l.split(' ', 1))
1930 except (ValueError, TypeError):
1929 except (ValueError, TypeError):
1931 raise error.ResponseError(
1930 raise error.ResponseError(
1932 _('Unexpected response from remote server:'), l)
1931 _('Unexpected response from remote server:'), l)
1933 self.ui.status(_('%d files to transfer, %s of data\n') %
1932 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 (total_files, util.bytecount(total_bytes)))
1933 (total_files, util.bytecount(total_bytes)))
1935 start = time.time()
1934 start = time.time()
1936 for i in xrange(total_files):
1935 for i in xrange(total_files):
1937 # XXX doesn't support '\n' or '\r' in filenames
1936 # XXX doesn't support '\n' or '\r' in filenames
1938 l = fp.readline()
1937 l = fp.readline()
1939 try:
1938 try:
1940 name, size = l.split('\0', 1)
1939 name, size = l.split('\0', 1)
1941 size = int(size)
1940 size = int(size)
1942 except (ValueError, TypeError):
1941 except (ValueError, TypeError):
1943 raise error.ResponseError(
1942 raise error.ResponseError(
1944 _('Unexpected response from remote server:'), l)
1943 _('Unexpected response from remote server:'), l)
1945 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1944 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 # for backwards compat, name was partially encoded
1945 # for backwards compat, name was partially encoded
1947 ofp = self.sopener(store.decodedir(name), 'w')
1946 ofp = self.sopener(store.decodedir(name), 'w')
1948 for chunk in util.filechunkiter(fp, limit=size):
1947 for chunk in util.filechunkiter(fp, limit=size):
1949 ofp.write(chunk)
1948 ofp.write(chunk)
1950 ofp.close()
1949 ofp.close()
1951 elapsed = time.time() - start
1950 elapsed = time.time() - start
1952 if elapsed <= 0:
1951 if elapsed <= 0:
1953 elapsed = 0.001
1952 elapsed = 0.001
1954 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1953 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1955 (util.bytecount(total_bytes), elapsed,
1954 (util.bytecount(total_bytes), elapsed,
1956 util.bytecount(total_bytes / elapsed)))
1955 util.bytecount(total_bytes / elapsed)))
1957
1956
1958 # new requirements = old non-format requirements + new format-related
1957 # new requirements = old non-format requirements + new format-related
1959 # requirements from the streamed-in repository
1958 # requirements from the streamed-in repository
1960 requirements.update(set(self.requirements) - self.supportedformats)
1959 requirements.update(set(self.requirements) - self.supportedformats)
1961 self._applyrequirements(requirements)
1960 self._applyrequirements(requirements)
1962 self._writerequirements()
1961 self._writerequirements()
1963
1962
1964 self.invalidate()
1963 self.invalidate()
1965 return len(self.heads()) + 1
1964 return len(self.heads()) + 1
1966
1965
1967 def clone(self, remote, heads=[], stream=False):
1966 def clone(self, remote, heads=[], stream=False):
1968 '''clone remote repository.
1967 '''clone remote repository.
1969
1968
1970 keyword arguments:
1969 keyword arguments:
1971 heads: list of revs to clone (forces use of pull)
1970 heads: list of revs to clone (forces use of pull)
1972 stream: use streaming clone if possible'''
1971 stream: use streaming clone if possible'''
1973
1972
1974 # now, all clients that can request uncompressed clones can
1973 # now, all clients that can request uncompressed clones can
1975 # read repo formats supported by all servers that can serve
1974 # read repo formats supported by all servers that can serve
1976 # them.
1975 # them.
1977
1976
1978 # if revlog format changes, client will have to check version
1977 # if revlog format changes, client will have to check version
1979 # and format flags on "stream" capability, and use
1978 # and format flags on "stream" capability, and use
1980 # uncompressed only if compatible.
1979 # uncompressed only if compatible.
1981
1980
1982 if stream and not heads:
1981 if stream and not heads:
1983 # 'stream' means remote revlog format is revlogv1 only
1982 # 'stream' means remote revlog format is revlogv1 only
1984 if remote.capable('stream'):
1983 if remote.capable('stream'):
1985 return self.stream_in(remote, set(('revlogv1',)))
1984 return self.stream_in(remote, set(('revlogv1',)))
1986 # otherwise, 'streamreqs' contains the remote revlog format
1985 # otherwise, 'streamreqs' contains the remote revlog format
1987 streamreqs = remote.capable('streamreqs')
1986 streamreqs = remote.capable('streamreqs')
1988 if streamreqs:
1987 if streamreqs:
1989 streamreqs = set(streamreqs.split(','))
1988 streamreqs = set(streamreqs.split(','))
1990 # if we support it, stream in and adjust our requirements
1989 # if we support it, stream in and adjust our requirements
1991 if not streamreqs - self.supportedformats:
1990 if not streamreqs - self.supportedformats:
1992 return self.stream_in(remote, streamreqs)
1991 return self.stream_in(remote, streamreqs)
1993 return self.pull(remote, heads)
1992 return self.pull(remote, heads)
1994
1993
1995 def pushkey(self, namespace, key, old, new):
1994 def pushkey(self, namespace, key, old, new):
1996 return pushkey.push(self, namespace, key, old, new)
1995 return pushkey.push(self, namespace, key, old, new)
1997
1996
1998 def listkeys(self, namespace):
1997 def listkeys(self, namespace):
1999 return pushkey.list(self, namespace)
1998 return pushkey.list(self, namespace)
2000
1999
2001 # used to avoid circular references so destructors work
2000 # used to avoid circular references so destructors work
2002 def aftertrans(files):
2001 def aftertrans(files):
2003 renamefiles = [tuple(t) for t in files]
2002 renamefiles = [tuple(t) for t in files]
2004 def a():
2003 def a():
2005 for src, dest in renamefiles:
2004 for src, dest in renamefiles:
2006 util.rename(src, dest)
2005 util.rename(src, dest)
2007 return a
2006 return a
2008
2007
2009 def instance(ui, path, create):
2008 def instance(ui, path, create):
2010 return localrepository(ui, util.drop_scheme('file', path), create)
2009 return localrepository(ui, util.drop_scheme('file', path), create)
2011
2010
2012 def islocal(path):
2011 def islocal(path):
2013 return True
2012 return True
@@ -1,68 +1,66 b''
1 $ echo "[extensions]" >> $HGRCPATH
1 $ echo "[extensions]" >> $HGRCPATH
2 $ echo "rebase=" >> $HGRCPATH
2 $ echo "rebase=" >> $HGRCPATH
3 $ echo "bookmarks=" >> $HGRCPATH
3 $ echo "bookmarks=" >> $HGRCPATH
4
4
5 initialize repository
5 initialize repository
6
6
7 $ hg init
7 $ hg init
8
8
9 $ echo 'a' > a
9 $ echo 'a' > a
10 $ hg ci -A -m "0"
10 $ hg ci -A -m "0"
11 adding a
11 adding a
12
12
13 $ echo 'b' > b
13 $ echo 'b' > b
14 $ hg ci -A -m "1"
14 $ hg ci -A -m "1"
15 adding b
15 adding b
16
16
17 $ hg up 0
17 $ hg up 0
18 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
18 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
19 $ echo 'c' > c
19 $ echo 'c' > c
20 $ hg ci -A -m "2"
20 $ hg ci -A -m "2"
21 adding c
21 adding c
22 created new head
22 created new head
23
23
24 $ echo 'd' > d
24 $ echo 'd' > d
25 $ hg ci -A -m "3"
25 $ hg ci -A -m "3"
26 adding d
26 adding d
27
27
28 $ hg bookmark -r 1 one
28 $ hg bookmark -r 1 one
29 $ hg bookmark -r 3 two
29 $ hg bookmark -r 3 two
30
30
31 bookmark list
31 bookmark list
32
32
33 $ hg bookmark
33 $ hg bookmark
34 * two 3:2ae46b1d99a7
34 * two 3:2ae46b1d99a7
35 one 1:925d80f479bb
35 one 1:925d80f479bb
36
36
37 rebase
37 rebase
38
38
39 $ hg rebase -s two -d one
39 $ hg rebase -s two -d one
40 saved backup bundle to $TESTTMP/.hg/strip-backup/*-backup.hg (glob)
40 saved backup bundle to $TESTTMP/.hg/strip-backup/*-backup.hg (glob)
41
41
42 $ hg log
42 $ hg log
43 changeset: 3:9163974d1cb5
43 changeset: 3:9163974d1cb5
44 tag: one
45 tag: tip
44 tag: tip
46 tag: two
47 parent: 1:925d80f479bb
45 parent: 1:925d80f479bb
48 parent: 2:db815d6d32e6
46 parent: 2:db815d6d32e6
49 user: test
47 user: test
50 date: Thu Jan 01 00:00:00 1970 +0000
48 date: Thu Jan 01 00:00:00 1970 +0000
51 summary: 3
49 summary: 3
52
50
53 changeset: 2:db815d6d32e6
51 changeset: 2:db815d6d32e6
54 parent: 0:f7b1eb17ad24
52 parent: 0:f7b1eb17ad24
55 user: test
53 user: test
56 date: Thu Jan 01 00:00:00 1970 +0000
54 date: Thu Jan 01 00:00:00 1970 +0000
57 summary: 2
55 summary: 2
58
56
59 changeset: 1:925d80f479bb
57 changeset: 1:925d80f479bb
60 user: test
58 user: test
61 date: Thu Jan 01 00:00:00 1970 +0000
59 date: Thu Jan 01 00:00:00 1970 +0000
62 summary: 1
60 summary: 1
63
61
64 changeset: 0:f7b1eb17ad24
62 changeset: 0:f7b1eb17ad24
65 user: test
63 user: test
66 date: Thu Jan 01 00:00:00 1970 +0000
64 date: Thu Jan 01 00:00:00 1970 +0000
67 summary: 0
65 summary: 0
68
66
@@ -1,213 +1,205 b''
1 $ echo "[extensions]" >> $HGRCPATH
1 $ echo "[extensions]" >> $HGRCPATH
2 $ echo "bookmarks=" >> $HGRCPATH
2 $ echo "bookmarks=" >> $HGRCPATH
3
3
4 $ hg init
4 $ hg init
5
5
6 no bookmarks
6 no bookmarks
7
7
8 $ hg bookmarks
8 $ hg bookmarks
9 no bookmarks set
9 no bookmarks set
10
10
11 bookmark rev -1
11 bookmark rev -1
12
12
13 $ hg bookmark X
13 $ hg bookmark X
14
14
15 list bookmarks
15 list bookmarks
16
16
17 $ hg bookmarks
17 $ hg bookmarks
18 * X -1:000000000000
18 * X -1:000000000000
19
19
20 list bookmarks with color
20 list bookmarks with color
21
21
22 $ hg --config extensions.color= --config color.mode=ansi \
22 $ hg --config extensions.color= --config color.mode=ansi \
23 > bookmarks --color=always
23 > bookmarks --color=always
24 \x1b[0;32m * X -1:000000000000\x1b[0m (esc)
24 \x1b[0;32m * X -1:000000000000\x1b[0m (esc)
25
25
26 $ echo a > a
26 $ echo a > a
27 $ hg add a
27 $ hg add a
28 $ hg commit -m 0
28 $ hg commit -m 0
29
29
30 bookmark X moved to rev 0
30 bookmark X moved to rev 0
31
31
32 $ hg bookmarks
32 $ hg bookmarks
33 * X 0:f7b1eb17ad24
33 * X 0:f7b1eb17ad24
34
34
35 look up bookmark
35 look up bookmark
36
36
37 $ hg log -r X
37 $ hg log -r X
38 changeset: 0:f7b1eb17ad24
38 changeset: 0:f7b1eb17ad24
39 tag: X
40 tag: tip
39 tag: tip
41 user: test
40 user: test
42 date: Thu Jan 01 00:00:00 1970 +0000
41 date: Thu Jan 01 00:00:00 1970 +0000
43 summary: 0
42 summary: 0
44
43
45
44
46 second bookmark for rev 0
45 second bookmark for rev 0
47
46
48 $ hg bookmark X2
47 $ hg bookmark X2
49
48
50 bookmark rev -1 again
49 bookmark rev -1 again
51
50
52 $ hg bookmark -r null Y
51 $ hg bookmark -r null Y
53
52
54 list bookmarks
53 list bookmarks
55
54
56 $ hg bookmarks
55 $ hg bookmarks
57 * X2 0:f7b1eb17ad24
56 * X2 0:f7b1eb17ad24
58 * X 0:f7b1eb17ad24
57 * X 0:f7b1eb17ad24
59 Y -1:000000000000
58 Y -1:000000000000
60
59
61 $ echo b > b
60 $ echo b > b
62 $ hg add b
61 $ hg add b
63 $ hg commit -m 1
62 $ hg commit -m 1
64
63
65 bookmarks revset
64 bookmarks revset
66
65
67 $ hg log -r 'bookmark()'
66 $ hg log -r 'bookmark()'
68 changeset: 1:925d80f479bb
67 changeset: 1:925d80f479bb
69 tag: X
70 tag: X2
71 tag: tip
68 tag: tip
72 user: test
69 user: test
73 date: Thu Jan 01 00:00:00 1970 +0000
70 date: Thu Jan 01 00:00:00 1970 +0000
74 summary: 1
71 summary: 1
75
72
76 $ hg log -r 'bookmark(Y)'
73 $ hg log -r 'bookmark(Y)'
77 $ hg log -r 'bookmark(X2)'
74 $ hg log -r 'bookmark(X2)'
78 changeset: 1:925d80f479bb
75 changeset: 1:925d80f479bb
79 tag: X
80 tag: X2
81 tag: tip
76 tag: tip
82 user: test
77 user: test
83 date: Thu Jan 01 00:00:00 1970 +0000
78 date: Thu Jan 01 00:00:00 1970 +0000
84 summary: 1
79 summary: 1
85
80
86 $ hg help revsets | grep 'bookmark('
81 $ hg help revsets | grep 'bookmark('
87 "bookmark([name])"
82 "bookmark([name])"
88
83
89 bookmarks X and X2 moved to rev 1, Y at rev -1
84 bookmarks X and X2 moved to rev 1, Y at rev -1
90
85
91 $ hg bookmarks
86 $ hg bookmarks
92 * X2 1:925d80f479bb
87 * X2 1:925d80f479bb
93 * X 1:925d80f479bb
88 * X 1:925d80f479bb
94 Y -1:000000000000
89 Y -1:000000000000
95
90
96 bookmark rev 0 again
91 bookmark rev 0 again
97
92
98 $ hg bookmark -r 0 Z
93 $ hg bookmark -r 0 Z
99
94
100 $ echo c > c
95 $ echo c > c
101 $ hg add c
96 $ hg add c
102 $ hg commit -m 2
97 $ hg commit -m 2
103
98
104 bookmarks X and X2 moved to rev 2, Y at rev -1, Z at rev 0
99 bookmarks X and X2 moved to rev 2, Y at rev -1, Z at rev 0
105
100
106 $ hg bookmarks
101 $ hg bookmarks
107 * X2 2:0316ce92851d
102 * X2 2:0316ce92851d
108 * X 2:0316ce92851d
103 * X 2:0316ce92851d
109 Z 0:f7b1eb17ad24
104 Z 0:f7b1eb17ad24
110 Y -1:000000000000
105 Y -1:000000000000
111
106
112 rename nonexistent bookmark
107 rename nonexistent bookmark
113
108
114 $ hg bookmark -m A B
109 $ hg bookmark -m A B
115 abort: a bookmark of this name does not exist
110 abort: a bookmark of this name does not exist
116 [255]
111 [255]
117
112
118 rename to existent bookmark
113 rename to existent bookmark
119
114
120 $ hg bookmark -m X Y
115 $ hg bookmark -m X Y
121 abort: a bookmark of the same name already exists
116 abort: a bookmark of the same name already exists
122 [255]
117 [255]
123
118
124 force rename to existent bookmark
119 force rename to existent bookmark
125
120
126 $ hg bookmark -f -m X Y
121 $ hg bookmark -f -m X Y
127
122
128 list bookmarks
123 list bookmarks
129
124
130 $ hg bookmark
125 $ hg bookmark
131 * X2 2:0316ce92851d
126 * X2 2:0316ce92851d
132 * Y 2:0316ce92851d
127 * Y 2:0316ce92851d
133 Z 0:f7b1eb17ad24
128 Z 0:f7b1eb17ad24
134
129
135 rename without new name
130 rename without new name
136
131
137 $ hg bookmark -m Y
132 $ hg bookmark -m Y
138 abort: new bookmark name required
133 abort: new bookmark name required
139 [255]
134 [255]
140
135
141 delete without name
136 delete without name
142
137
143 $ hg bookmark -d
138 $ hg bookmark -d
144 abort: bookmark name required
139 abort: bookmark name required
145 [255]
140 [255]
146
141
147 delete nonexistent bookmark
142 delete nonexistent bookmark
148
143
149 $ hg bookmark -d A
144 $ hg bookmark -d A
150 abort: a bookmark of this name does not exist
145 abort: a bookmark of this name does not exist
151 [255]
146 [255]
152
147
153 bookmark name with spaces should be stripped
148 bookmark name with spaces should be stripped
154
149
155 $ hg bookmark ' x y '
150 $ hg bookmark ' x y '
156
151
157 list bookmarks
152 list bookmarks
158
153
159 $ hg bookmarks
154 $ hg bookmarks
160 * X2 2:0316ce92851d
155 * X2 2:0316ce92851d
161 * Y 2:0316ce92851d
156 * Y 2:0316ce92851d
162 Z 0:f7b1eb17ad24
157 Z 0:f7b1eb17ad24
163 * x y 2:0316ce92851d
158 * x y 2:0316ce92851d
164
159
165 look up stripped bookmark name
160 look up stripped bookmark name
166
161
167 $ hg log -r '"x y"'
162 $ hg log -r '"x y"'
168 changeset: 2:0316ce92851d
163 changeset: 2:0316ce92851d
169 tag: X2
170 tag: Y
171 tag: tip
164 tag: tip
172 tag: x y
173 user: test
165 user: test
174 date: Thu Jan 01 00:00:00 1970 +0000
166 date: Thu Jan 01 00:00:00 1970 +0000
175 summary: 2
167 summary: 2
176
168
177
169
178 reject bookmark name with newline
170 reject bookmark name with newline
179
171
180 $ hg bookmark '
172 $ hg bookmark '
181 > '
173 > '
182 abort: bookmark name cannot contain newlines
174 abort: bookmark name cannot contain newlines
183 [255]
175 [255]
184
176
185 bookmark with existing name
177 bookmark with existing name
186
178
187 $ hg bookmark Z
179 $ hg bookmark Z
188 abort: a bookmark of the same name already exists
180 abort: a bookmark of the same name already exists
189 [255]
181 [255]
190
182
191 force bookmark with existing name
183 force bookmark with existing name
192
184
193 $ hg bookmark -f Z
185 $ hg bookmark -f Z
194
186
195 list bookmarks
187 list bookmarks
196
188
197 $ hg bookmark
189 $ hg bookmark
198 * X2 2:0316ce92851d
190 * X2 2:0316ce92851d
199 * Y 2:0316ce92851d
191 * Y 2:0316ce92851d
200 * Z 2:0316ce92851d
192 * Z 2:0316ce92851d
201 * x y 2:0316ce92851d
193 * x y 2:0316ce92851d
202
194
203 revision but no bookmark name
195 revision but no bookmark name
204
196
205 $ hg bookmark -r .
197 $ hg bookmark -r .
206 abort: bookmark name required
198 abort: bookmark name required
207 [255]
199 [255]
208
200
209 bookmark name with whitespace only
201 bookmark name with whitespace only
210
202
211 $ hg bookmark ' '
203 $ hg bookmark ' '
212 abort: bookmark names cannot consist entirely of whitespace
204 abort: bookmark names cannot consist entirely of whitespace
213 [255]
205 [255]
General Comments 0
You need to be logged in to leave comments. Login now