##// END OF EJS Templates
localrepo: make requirements attribute of newly-created repos contain a set...
Andrew Pritchard -
r14905:207935cd default
parent child Browse files
Show More
@@ -1,2002 +1,2003 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=False):
28 def __init__(self, baseui, path=None, create=False):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'generaldelta', False):
64 if self.ui.configbool('format', 'generaldelta', False):
65 requirements.append("generaldelta")
65 requirements.append("generaldelta")
66 requirements = set(requirements)
66 else:
67 else:
67 raise error.RepoError(_("repository %s not found") % path)
68 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
69 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
70 raise error.RepoError(_("repository %s already exists") % path)
70 else:
71 else:
71 try:
72 try:
72 requirements = scmutil.readrequires(self.opener, self.supported)
73 requirements = scmutil.readrequires(self.opener, self.supported)
73 except IOError, inst:
74 except IOError, inst:
74 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
75 raise
76 raise
76 requirements = set()
77 requirements = set()
77
78
78 self.sharedpath = self.path
79 self.sharedpath = self.path
79 try:
80 try:
80 s = os.path.realpath(self.opener.read("sharedpath"))
81 s = os.path.realpath(self.opener.read("sharedpath"))
81 if not os.path.exists(s):
82 if not os.path.exists(s):
82 raise error.RepoError(
83 raise error.RepoError(
83 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 self.sharedpath = s
85 self.sharedpath = s
85 except IOError, inst:
86 except IOError, inst:
86 if inst.errno != errno.ENOENT:
87 if inst.errno != errno.ENOENT:
87 raise
88 raise
88
89
89 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
90 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
90 self.spath = self.store.path
91 self.spath = self.store.path
91 self.sopener = self.store.opener
92 self.sopener = self.store.opener
92 self.sjoin = self.store.join
93 self.sjoin = self.store.join
93 self.opener.createmode = self.store.createmode
94 self.opener.createmode = self.store.createmode
94 self._applyrequirements(requirements)
95 self._applyrequirements(requirements)
95 if create:
96 if create:
96 self._writerequirements()
97 self._writerequirements()
97
98
98 # These two define the set of tags for this repository. _tags
99 # These two define the set of tags for this repository. _tags
99 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 # 'local'. (Global tags are defined by .hgtags across all
101 # 'local'. (Global tags are defined by .hgtags across all
101 # heads, and local tags are defined in .hg/localtags.) They
102 # heads, and local tags are defined in .hg/localtags.) They
102 # constitute the in-memory cache of tags.
103 # constitute the in-memory cache of tags.
103 self._tags = None
104 self._tags = None
104 self._tagtypes = None
105 self._tagtypes = None
105
106
106 self._branchcache = None
107 self._branchcache = None
107 self._branchcachetip = None
108 self._branchcachetip = None
108 self.nodetagscache = None
109 self.nodetagscache = None
109 self.filterpats = {}
110 self.filterpats = {}
110 self._datafilters = {}
111 self._datafilters = {}
111 self._transref = self._lockref = self._wlockref = None
112 self._transref = self._lockref = self._wlockref = None
112
113
113 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
114 self.requirements = requirements
115 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
118 if r in openerreqs)
118
119
119 def _writerequirements(self):
120 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
122 for r in self.requirements:
122 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
123 reqfile.close()
124 reqfile.close()
124
125
125 def _checknested(self, path):
126 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
128 if not path.startswith(self.root):
128 return False
129 return False
129 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
130
131
131 # XXX: Checking against the current working copy is wrong in
132 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
133 # the sense that it can reject things like
133 #
134 #
134 # $ hg cat -r 10 sub/x.txt
135 # $ hg cat -r 10 sub/x.txt
135 #
136 #
136 # if sub/ is no longer a subrepository in the working copy
137 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
138 # parent revision.
138 #
139 #
139 # However, it can of course also allow things that would have
140 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
141 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
142 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
143 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
144 # panics when it sees sub/.hg/.
144 #
145 #
145 # All in all, checking against the working copy seems sensible
146 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
147 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
148 # the filesystem *now*.
148 ctx = self[None]
149 ctx = self[None]
149 parts = util.splitpath(subpath)
150 parts = util.splitpath(subpath)
150 while parts:
151 while parts:
151 prefix = os.sep.join(parts)
152 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
153 if prefix in ctx.substate:
153 if prefix == subpath:
154 if prefix == subpath:
154 return True
155 return True
155 else:
156 else:
156 sub = ctx.sub(prefix)
157 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
158 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
159 else:
159 parts.pop()
160 parts.pop()
160 return False
161 return False
161
162
162 @util.propertycache
163 @util.propertycache
163 def _bookmarks(self):
164 def _bookmarks(self):
164 return bookmarks.read(self)
165 return bookmarks.read(self)
165
166
166 @util.propertycache
167 @util.propertycache
167 def _bookmarkcurrent(self):
168 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
169 return bookmarks.readcurrent(self)
169
170
170 @propertycache
171 @propertycache
171 def changelog(self):
172 def changelog(self):
172 c = changelog.changelog(self.sopener)
173 c = changelog.changelog(self.sopener)
173 if 'HG_PENDING' in os.environ:
174 if 'HG_PENDING' in os.environ:
174 p = os.environ['HG_PENDING']
175 p = os.environ['HG_PENDING']
175 if p.startswith(self.root):
176 if p.startswith(self.root):
176 c.readpending('00changelog.i.a')
177 c.readpending('00changelog.i.a')
177 return c
178 return c
178
179
179 @propertycache
180 @propertycache
180 def manifest(self):
181 def manifest(self):
181 return manifest.manifest(self.sopener)
182 return manifest.manifest(self.sopener)
182
183
183 @propertycache
184 @propertycache
184 def dirstate(self):
185 def dirstate(self):
185 warned = [0]
186 warned = [0]
186 def validate(node):
187 def validate(node):
187 try:
188 try:
188 self.changelog.rev(node)
189 self.changelog.rev(node)
189 return node
190 return node
190 except error.LookupError:
191 except error.LookupError:
191 if not warned[0]:
192 if not warned[0]:
192 warned[0] = True
193 warned[0] = True
193 self.ui.warn(_("warning: ignoring unknown"
194 self.ui.warn(_("warning: ignoring unknown"
194 " working parent %s!\n") % short(node))
195 " working parent %s!\n") % short(node))
195 return nullid
196 return nullid
196
197
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198
199
199 def __getitem__(self, changeid):
200 def __getitem__(self, changeid):
200 if changeid is None:
201 if changeid is None:
201 return context.workingctx(self)
202 return context.workingctx(self)
202 return context.changectx(self, changeid)
203 return context.changectx(self, changeid)
203
204
204 def __contains__(self, changeid):
205 def __contains__(self, changeid):
205 try:
206 try:
206 return bool(self.lookup(changeid))
207 return bool(self.lookup(changeid))
207 except error.RepoLookupError:
208 except error.RepoLookupError:
208 return False
209 return False
209
210
210 def __nonzero__(self):
211 def __nonzero__(self):
211 return True
212 return True
212
213
213 def __len__(self):
214 def __len__(self):
214 return len(self.changelog)
215 return len(self.changelog)
215
216
216 def __iter__(self):
217 def __iter__(self):
217 for i in xrange(len(self)):
218 for i in xrange(len(self)):
218 yield i
219 yield i
219
220
220 def set(self, expr, *args):
221 def set(self, expr, *args):
221 '''
222 '''
222 Yield a context for each matching revision, after doing arg
223 Yield a context for each matching revision, after doing arg
223 replacement via revset.formatspec
224 replacement via revset.formatspec
224 '''
225 '''
225
226
226 expr = revset.formatspec(expr, *args)
227 expr = revset.formatspec(expr, *args)
227 m = revset.match(None, expr)
228 m = revset.match(None, expr)
228 for r in m(self, range(len(self))):
229 for r in m(self, range(len(self))):
229 yield self[r]
230 yield self[r]
230
231
231 def url(self):
232 def url(self):
232 return 'file:' + self.root
233 return 'file:' + self.root
233
234
234 def hook(self, name, throw=False, **args):
235 def hook(self, name, throw=False, **args):
235 return hook.hook(self.ui, self, name, throw, **args)
236 return hook.hook(self.ui, self, name, throw, **args)
236
237
237 tag_disallowed = ':\r\n'
238 tag_disallowed = ':\r\n'
238
239
239 def _tag(self, names, node, message, local, user, date, extra={}):
240 def _tag(self, names, node, message, local, user, date, extra={}):
240 if isinstance(names, str):
241 if isinstance(names, str):
241 allchars = names
242 allchars = names
242 names = (names,)
243 names = (names,)
243 else:
244 else:
244 allchars = ''.join(names)
245 allchars = ''.join(names)
245 for c in self.tag_disallowed:
246 for c in self.tag_disallowed:
246 if c in allchars:
247 if c in allchars:
247 raise util.Abort(_('%r cannot be used in a tag name') % c)
248 raise util.Abort(_('%r cannot be used in a tag name') % c)
248
249
249 branches = self.branchmap()
250 branches = self.branchmap()
250 for name in names:
251 for name in names:
251 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 local=local)
253 local=local)
253 if name in branches:
254 if name in branches:
254 self.ui.warn(_("warning: tag %s conflicts with existing"
255 self.ui.warn(_("warning: tag %s conflicts with existing"
255 " branch name\n") % name)
256 " branch name\n") % name)
256
257
257 def writetags(fp, names, munge, prevtags):
258 def writetags(fp, names, munge, prevtags):
258 fp.seek(0, 2)
259 fp.seek(0, 2)
259 if prevtags and prevtags[-1] != '\n':
260 if prevtags and prevtags[-1] != '\n':
260 fp.write('\n')
261 fp.write('\n')
261 for name in names:
262 for name in names:
262 m = munge and munge(name) or name
263 m = munge and munge(name) or name
263 if self._tagtypes and name in self._tagtypes:
264 if self._tagtypes and name in self._tagtypes:
264 old = self._tags.get(name, nullid)
265 old = self._tags.get(name, nullid)
265 fp.write('%s %s\n' % (hex(old), m))
266 fp.write('%s %s\n' % (hex(old), m))
266 fp.write('%s %s\n' % (hex(node), m))
267 fp.write('%s %s\n' % (hex(node), m))
267 fp.close()
268 fp.close()
268
269
269 prevtags = ''
270 prevtags = ''
270 if local:
271 if local:
271 try:
272 try:
272 fp = self.opener('localtags', 'r+')
273 fp = self.opener('localtags', 'r+')
273 except IOError:
274 except IOError:
274 fp = self.opener('localtags', 'a')
275 fp = self.opener('localtags', 'a')
275 else:
276 else:
276 prevtags = fp.read()
277 prevtags = fp.read()
277
278
278 # local tags are stored in the current charset
279 # local tags are stored in the current charset
279 writetags(fp, names, None, prevtags)
280 writetags(fp, names, None, prevtags)
280 for name in names:
281 for name in names:
281 self.hook('tag', node=hex(node), tag=name, local=local)
282 self.hook('tag', node=hex(node), tag=name, local=local)
282 return
283 return
283
284
284 try:
285 try:
285 fp = self.wfile('.hgtags', 'rb+')
286 fp = self.wfile('.hgtags', 'rb+')
286 except IOError, e:
287 except IOError, e:
287 if e.errno != errno.ENOENT:
288 if e.errno != errno.ENOENT:
288 raise
289 raise
289 fp = self.wfile('.hgtags', 'ab')
290 fp = self.wfile('.hgtags', 'ab')
290 else:
291 else:
291 prevtags = fp.read()
292 prevtags = fp.read()
292
293
293 # committed tags are stored in UTF-8
294 # committed tags are stored in UTF-8
294 writetags(fp, names, encoding.fromlocal, prevtags)
295 writetags(fp, names, encoding.fromlocal, prevtags)
295
296
296 fp.close()
297 fp.close()
297
298
298 if '.hgtags' not in self.dirstate:
299 if '.hgtags' not in self.dirstate:
299 self[None].add(['.hgtags'])
300 self[None].add(['.hgtags'])
300
301
301 m = matchmod.exact(self.root, '', ['.hgtags'])
302 m = matchmod.exact(self.root, '', ['.hgtags'])
302 tagnode = self.commit(message, user, date, extra=extra, match=m)
303 tagnode = self.commit(message, user, date, extra=extra, match=m)
303
304
304 for name in names:
305 for name in names:
305 self.hook('tag', node=hex(node), tag=name, local=local)
306 self.hook('tag', node=hex(node), tag=name, local=local)
306
307
307 return tagnode
308 return tagnode
308
309
309 def tag(self, names, node, message, local, user, date):
310 def tag(self, names, node, message, local, user, date):
310 '''tag a revision with one or more symbolic names.
311 '''tag a revision with one or more symbolic names.
311
312
312 names is a list of strings or, when adding a single tag, names may be a
313 names is a list of strings or, when adding a single tag, names may be a
313 string.
314 string.
314
315
315 if local is True, the tags are stored in a per-repository file.
316 if local is True, the tags are stored in a per-repository file.
316 otherwise, they are stored in the .hgtags file, and a new
317 otherwise, they are stored in the .hgtags file, and a new
317 changeset is committed with the change.
318 changeset is committed with the change.
318
319
319 keyword arguments:
320 keyword arguments:
320
321
321 local: whether to store tags in non-version-controlled file
322 local: whether to store tags in non-version-controlled file
322 (default False)
323 (default False)
323
324
324 message: commit message to use if committing
325 message: commit message to use if committing
325
326
326 user: name of user to use if committing
327 user: name of user to use if committing
327
328
328 date: date tuple to use if committing'''
329 date: date tuple to use if committing'''
329
330
330 if not local:
331 if not local:
331 for x in self.status()[:5]:
332 for x in self.status()[:5]:
332 if '.hgtags' in x:
333 if '.hgtags' in x:
333 raise util.Abort(_('working copy of .hgtags is changed '
334 raise util.Abort(_('working copy of .hgtags is changed '
334 '(please commit .hgtags manually)'))
335 '(please commit .hgtags manually)'))
335
336
336 self.tags() # instantiate the cache
337 self.tags() # instantiate the cache
337 self._tag(names, node, message, local, user, date)
338 self._tag(names, node, message, local, user, date)
338
339
339 def tags(self):
340 def tags(self):
340 '''return a mapping of tag to node'''
341 '''return a mapping of tag to node'''
341 if self._tags is None:
342 if self._tags is None:
342 (self._tags, self._tagtypes) = self._findtags()
343 (self._tags, self._tagtypes) = self._findtags()
343
344
344 return self._tags
345 return self._tags
345
346
346 def _findtags(self):
347 def _findtags(self):
347 '''Do the hard work of finding tags. Return a pair of dicts
348 '''Do the hard work of finding tags. Return a pair of dicts
348 (tags, tagtypes) where tags maps tag name to node, and tagtypes
349 (tags, tagtypes) where tags maps tag name to node, and tagtypes
349 maps tag name to a string like \'global\' or \'local\'.
350 maps tag name to a string like \'global\' or \'local\'.
350 Subclasses or extensions are free to add their own tags, but
351 Subclasses or extensions are free to add their own tags, but
351 should be aware that the returned dicts will be retained for the
352 should be aware that the returned dicts will be retained for the
352 duration of the localrepo object.'''
353 duration of the localrepo object.'''
353
354
354 # XXX what tagtype should subclasses/extensions use? Currently
355 # XXX what tagtype should subclasses/extensions use? Currently
355 # mq and bookmarks add tags, but do not set the tagtype at all.
356 # mq and bookmarks add tags, but do not set the tagtype at all.
356 # Should each extension invent its own tag type? Should there
357 # Should each extension invent its own tag type? Should there
357 # be one tagtype for all such "virtual" tags? Or is the status
358 # be one tagtype for all such "virtual" tags? Or is the status
358 # quo fine?
359 # quo fine?
359
360
360 alltags = {} # map tag name to (node, hist)
361 alltags = {} # map tag name to (node, hist)
361 tagtypes = {}
362 tagtypes = {}
362
363
363 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
364 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
364 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
365 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
365
366
366 # Build the return dicts. Have to re-encode tag names because
367 # Build the return dicts. Have to re-encode tag names because
367 # the tags module always uses UTF-8 (in order not to lose info
368 # the tags module always uses UTF-8 (in order not to lose info
368 # writing to the cache), but the rest of Mercurial wants them in
369 # writing to the cache), but the rest of Mercurial wants them in
369 # local encoding.
370 # local encoding.
370 tags = {}
371 tags = {}
371 for (name, (node, hist)) in alltags.iteritems():
372 for (name, (node, hist)) in alltags.iteritems():
372 if node != nullid:
373 if node != nullid:
373 try:
374 try:
374 # ignore tags to unknown nodes
375 # ignore tags to unknown nodes
375 self.changelog.lookup(node)
376 self.changelog.lookup(node)
376 tags[encoding.tolocal(name)] = node
377 tags[encoding.tolocal(name)] = node
377 except error.LookupError:
378 except error.LookupError:
378 pass
379 pass
379 tags['tip'] = self.changelog.tip()
380 tags['tip'] = self.changelog.tip()
380 tagtypes = dict([(encoding.tolocal(name), value)
381 tagtypes = dict([(encoding.tolocal(name), value)
381 for (name, value) in tagtypes.iteritems()])
382 for (name, value) in tagtypes.iteritems()])
382 return (tags, tagtypes)
383 return (tags, tagtypes)
383
384
384 def tagtype(self, tagname):
385 def tagtype(self, tagname):
385 '''
386 '''
386 return the type of the given tag. result can be:
387 return the type of the given tag. result can be:
387
388
388 'local' : a local tag
389 'local' : a local tag
389 'global' : a global tag
390 'global' : a global tag
390 None : tag does not exist
391 None : tag does not exist
391 '''
392 '''
392
393
393 self.tags()
394 self.tags()
394
395
395 return self._tagtypes.get(tagname)
396 return self._tagtypes.get(tagname)
396
397
397 def tagslist(self):
398 def tagslist(self):
398 '''return a list of tags ordered by revision'''
399 '''return a list of tags ordered by revision'''
399 l = []
400 l = []
400 for t, n in self.tags().iteritems():
401 for t, n in self.tags().iteritems():
401 r = self.changelog.rev(n)
402 r = self.changelog.rev(n)
402 l.append((r, t, n))
403 l.append((r, t, n))
403 return [(t, n) for r, t, n in sorted(l)]
404 return [(t, n) for r, t, n in sorted(l)]
404
405
405 def nodetags(self, node):
406 def nodetags(self, node):
406 '''return the tags associated with a node'''
407 '''return the tags associated with a node'''
407 if not self.nodetagscache:
408 if not self.nodetagscache:
408 self.nodetagscache = {}
409 self.nodetagscache = {}
409 for t, n in self.tags().iteritems():
410 for t, n in self.tags().iteritems():
410 self.nodetagscache.setdefault(n, []).append(t)
411 self.nodetagscache.setdefault(n, []).append(t)
411 for tags in self.nodetagscache.itervalues():
412 for tags in self.nodetagscache.itervalues():
412 tags.sort()
413 tags.sort()
413 return self.nodetagscache.get(node, [])
414 return self.nodetagscache.get(node, [])
414
415
415 def nodebookmarks(self, node):
416 def nodebookmarks(self, node):
416 marks = []
417 marks = []
417 for bookmark, n in self._bookmarks.iteritems():
418 for bookmark, n in self._bookmarks.iteritems():
418 if n == node:
419 if n == node:
419 marks.append(bookmark)
420 marks.append(bookmark)
420 return sorted(marks)
421 return sorted(marks)
421
422
422 def _branchtags(self, partial, lrev):
423 def _branchtags(self, partial, lrev):
423 # TODO: rename this function?
424 # TODO: rename this function?
424 tiprev = len(self) - 1
425 tiprev = len(self) - 1
425 if lrev != tiprev:
426 if lrev != tiprev:
426 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
427 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
427 self._updatebranchcache(partial, ctxgen)
428 self._updatebranchcache(partial, ctxgen)
428 self._writebranchcache(partial, self.changelog.tip(), tiprev)
429 self._writebranchcache(partial, self.changelog.tip(), tiprev)
429
430
430 return partial
431 return partial
431
432
432 def updatebranchcache(self):
433 def updatebranchcache(self):
433 tip = self.changelog.tip()
434 tip = self.changelog.tip()
434 if self._branchcache is not None and self._branchcachetip == tip:
435 if self._branchcache is not None and self._branchcachetip == tip:
435 return self._branchcache
436 return self._branchcache
436
437
437 oldtip = self._branchcachetip
438 oldtip = self._branchcachetip
438 self._branchcachetip = tip
439 self._branchcachetip = tip
439 if oldtip is None or oldtip not in self.changelog.nodemap:
440 if oldtip is None or oldtip not in self.changelog.nodemap:
440 partial, last, lrev = self._readbranchcache()
441 partial, last, lrev = self._readbranchcache()
441 else:
442 else:
442 lrev = self.changelog.rev(oldtip)
443 lrev = self.changelog.rev(oldtip)
443 partial = self._branchcache
444 partial = self._branchcache
444
445
445 self._branchtags(partial, lrev)
446 self._branchtags(partial, lrev)
446 # this private cache holds all heads (not just tips)
447 # this private cache holds all heads (not just tips)
447 self._branchcache = partial
448 self._branchcache = partial
448
449
449 def branchmap(self):
450 def branchmap(self):
450 '''returns a dictionary {branch: [branchheads]}'''
451 '''returns a dictionary {branch: [branchheads]}'''
451 self.updatebranchcache()
452 self.updatebranchcache()
452 return self._branchcache
453 return self._branchcache
453
454
454 def branchtags(self):
455 def branchtags(self):
455 '''return a dict where branch names map to the tipmost head of
456 '''return a dict where branch names map to the tipmost head of
456 the branch, open heads come before closed'''
457 the branch, open heads come before closed'''
457 bt = {}
458 bt = {}
458 for bn, heads in self.branchmap().iteritems():
459 for bn, heads in self.branchmap().iteritems():
459 tip = heads[-1]
460 tip = heads[-1]
460 for h in reversed(heads):
461 for h in reversed(heads):
461 if 'close' not in self.changelog.read(h)[5]:
462 if 'close' not in self.changelog.read(h)[5]:
462 tip = h
463 tip = h
463 break
464 break
464 bt[bn] = tip
465 bt[bn] = tip
465 return bt
466 return bt
466
467
467 def _readbranchcache(self):
468 def _readbranchcache(self):
468 partial = {}
469 partial = {}
469 try:
470 try:
470 f = self.opener("cache/branchheads")
471 f = self.opener("cache/branchheads")
471 lines = f.read().split('\n')
472 lines = f.read().split('\n')
472 f.close()
473 f.close()
473 except (IOError, OSError):
474 except (IOError, OSError):
474 return {}, nullid, nullrev
475 return {}, nullid, nullrev
475
476
476 try:
477 try:
477 last, lrev = lines.pop(0).split(" ", 1)
478 last, lrev = lines.pop(0).split(" ", 1)
478 last, lrev = bin(last), int(lrev)
479 last, lrev = bin(last), int(lrev)
479 if lrev >= len(self) or self[lrev].node() != last:
480 if lrev >= len(self) or self[lrev].node() != last:
480 # invalidate the cache
481 # invalidate the cache
481 raise ValueError('invalidating branch cache (tip differs)')
482 raise ValueError('invalidating branch cache (tip differs)')
482 for l in lines:
483 for l in lines:
483 if not l:
484 if not l:
484 continue
485 continue
485 node, label = l.split(" ", 1)
486 node, label = l.split(" ", 1)
486 label = encoding.tolocal(label.strip())
487 label = encoding.tolocal(label.strip())
487 partial.setdefault(label, []).append(bin(node))
488 partial.setdefault(label, []).append(bin(node))
488 except KeyboardInterrupt:
489 except KeyboardInterrupt:
489 raise
490 raise
490 except Exception, inst:
491 except Exception, inst:
491 if self.ui.debugflag:
492 if self.ui.debugflag:
492 self.ui.warn(str(inst), '\n')
493 self.ui.warn(str(inst), '\n')
493 partial, last, lrev = {}, nullid, nullrev
494 partial, last, lrev = {}, nullid, nullrev
494 return partial, last, lrev
495 return partial, last, lrev
495
496
496 def _writebranchcache(self, branches, tip, tiprev):
497 def _writebranchcache(self, branches, tip, tiprev):
497 try:
498 try:
498 f = self.opener("cache/branchheads", "w", atomictemp=True)
499 f = self.opener("cache/branchheads", "w", atomictemp=True)
499 f.write("%s %s\n" % (hex(tip), tiprev))
500 f.write("%s %s\n" % (hex(tip), tiprev))
500 for label, nodes in branches.iteritems():
501 for label, nodes in branches.iteritems():
501 for node in nodes:
502 for node in nodes:
502 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
503 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
503 f.rename()
504 f.rename()
504 except (IOError, OSError):
505 except (IOError, OSError):
505 pass
506 pass
506
507
507 def _updatebranchcache(self, partial, ctxgen):
508 def _updatebranchcache(self, partial, ctxgen):
508 # collect new branch entries
509 # collect new branch entries
509 newbranches = {}
510 newbranches = {}
510 for c in ctxgen:
511 for c in ctxgen:
511 newbranches.setdefault(c.branch(), []).append(c.node())
512 newbranches.setdefault(c.branch(), []).append(c.node())
512 # if older branchheads are reachable from new ones, they aren't
513 # if older branchheads are reachable from new ones, they aren't
513 # really branchheads. Note checking parents is insufficient:
514 # really branchheads. Note checking parents is insufficient:
514 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
515 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
515 for branch, newnodes in newbranches.iteritems():
516 for branch, newnodes in newbranches.iteritems():
516 bheads = partial.setdefault(branch, [])
517 bheads = partial.setdefault(branch, [])
517 bheads.extend(newnodes)
518 bheads.extend(newnodes)
518 if len(bheads) <= 1:
519 if len(bheads) <= 1:
519 continue
520 continue
520 bheads = sorted(bheads, key=lambda x: self[x].rev())
521 bheads = sorted(bheads, key=lambda x: self[x].rev())
521 # starting from tip means fewer passes over reachable
522 # starting from tip means fewer passes over reachable
522 while newnodes:
523 while newnodes:
523 latest = newnodes.pop()
524 latest = newnodes.pop()
524 if latest not in bheads:
525 if latest not in bheads:
525 continue
526 continue
526 minbhrev = self[bheads[0]].node()
527 minbhrev = self[bheads[0]].node()
527 reachable = self.changelog.reachable(latest, minbhrev)
528 reachable = self.changelog.reachable(latest, minbhrev)
528 reachable.remove(latest)
529 reachable.remove(latest)
529 if reachable:
530 if reachable:
530 bheads = [b for b in bheads if b not in reachable]
531 bheads = [b for b in bheads if b not in reachable]
531 partial[branch] = bheads
532 partial[branch] = bheads
532
533
533 def lookup(self, key):
534 def lookup(self, key):
534 if isinstance(key, int):
535 if isinstance(key, int):
535 return self.changelog.node(key)
536 return self.changelog.node(key)
536 elif key == '.':
537 elif key == '.':
537 return self.dirstate.p1()
538 return self.dirstate.p1()
538 elif key == 'null':
539 elif key == 'null':
539 return nullid
540 return nullid
540 elif key == 'tip':
541 elif key == 'tip':
541 return self.changelog.tip()
542 return self.changelog.tip()
542 n = self.changelog._match(key)
543 n = self.changelog._match(key)
543 if n:
544 if n:
544 return n
545 return n
545 if key in self._bookmarks:
546 if key in self._bookmarks:
546 return self._bookmarks[key]
547 return self._bookmarks[key]
547 if key in self.tags():
548 if key in self.tags():
548 return self.tags()[key]
549 return self.tags()[key]
549 if key in self.branchtags():
550 if key in self.branchtags():
550 return self.branchtags()[key]
551 return self.branchtags()[key]
551 n = self.changelog._partialmatch(key)
552 n = self.changelog._partialmatch(key)
552 if n:
553 if n:
553 return n
554 return n
554
555
555 # can't find key, check if it might have come from damaged dirstate
556 # can't find key, check if it might have come from damaged dirstate
556 if key in self.dirstate.parents():
557 if key in self.dirstate.parents():
557 raise error.Abort(_("working directory has unknown parent '%s'!")
558 raise error.Abort(_("working directory has unknown parent '%s'!")
558 % short(key))
559 % short(key))
559 try:
560 try:
560 if len(key) == 20:
561 if len(key) == 20:
561 key = hex(key)
562 key = hex(key)
562 except TypeError:
563 except TypeError:
563 pass
564 pass
564 raise error.RepoLookupError(_("unknown revision '%s'") % key)
565 raise error.RepoLookupError(_("unknown revision '%s'") % key)
565
566
566 def lookupbranch(self, key, remote=None):
567 def lookupbranch(self, key, remote=None):
567 repo = remote or self
568 repo = remote or self
568 if key in repo.branchmap():
569 if key in repo.branchmap():
569 return key
570 return key
570
571
571 repo = (remote and remote.local()) and remote or self
572 repo = (remote and remote.local()) and remote or self
572 return repo[key].branch()
573 return repo[key].branch()
573
574
574 def known(self, nodes):
575 def known(self, nodes):
575 nm = self.changelog.nodemap
576 nm = self.changelog.nodemap
576 return [(n in nm) for n in nodes]
577 return [(n in nm) for n in nodes]
577
578
578 def local(self):
579 def local(self):
579 return self
580 return self
580
581
581 def join(self, f):
582 def join(self, f):
582 return os.path.join(self.path, f)
583 return os.path.join(self.path, f)
583
584
584 def wjoin(self, f):
585 def wjoin(self, f):
585 return os.path.join(self.root, f)
586 return os.path.join(self.root, f)
586
587
587 def file(self, f):
588 def file(self, f):
588 if f[0] == '/':
589 if f[0] == '/':
589 f = f[1:]
590 f = f[1:]
590 return filelog.filelog(self.sopener, f)
591 return filelog.filelog(self.sopener, f)
591
592
592 def changectx(self, changeid):
593 def changectx(self, changeid):
593 return self[changeid]
594 return self[changeid]
594
595
595 def parents(self, changeid=None):
596 def parents(self, changeid=None):
596 '''get list of changectxs for parents of changeid'''
597 '''get list of changectxs for parents of changeid'''
597 return self[changeid].parents()
598 return self[changeid].parents()
598
599
599 def filectx(self, path, changeid=None, fileid=None):
600 def filectx(self, path, changeid=None, fileid=None):
600 """changeid can be a changeset revision, node, or tag.
601 """changeid can be a changeset revision, node, or tag.
601 fileid can be a file revision or node."""
602 fileid can be a file revision or node."""
602 return context.filectx(self, path, changeid, fileid)
603 return context.filectx(self, path, changeid, fileid)
603
604
604 def getcwd(self):
605 def getcwd(self):
605 return self.dirstate.getcwd()
606 return self.dirstate.getcwd()
606
607
607 def pathto(self, f, cwd=None):
608 def pathto(self, f, cwd=None):
608 return self.dirstate.pathto(f, cwd)
609 return self.dirstate.pathto(f, cwd)
609
610
610 def wfile(self, f, mode='r'):
611 def wfile(self, f, mode='r'):
611 return self.wopener(f, mode)
612 return self.wopener(f, mode)
612
613
613 def _link(self, f):
614 def _link(self, f):
614 return os.path.islink(self.wjoin(f))
615 return os.path.islink(self.wjoin(f))
615
616
616 def _loadfilter(self, filter):
617 def _loadfilter(self, filter):
617 if filter not in self.filterpats:
618 if filter not in self.filterpats:
618 l = []
619 l = []
619 for pat, cmd in self.ui.configitems(filter):
620 for pat, cmd in self.ui.configitems(filter):
620 if cmd == '!':
621 if cmd == '!':
621 continue
622 continue
622 mf = matchmod.match(self.root, '', [pat])
623 mf = matchmod.match(self.root, '', [pat])
623 fn = None
624 fn = None
624 params = cmd
625 params = cmd
625 for name, filterfn in self._datafilters.iteritems():
626 for name, filterfn in self._datafilters.iteritems():
626 if cmd.startswith(name):
627 if cmd.startswith(name):
627 fn = filterfn
628 fn = filterfn
628 params = cmd[len(name):].lstrip()
629 params = cmd[len(name):].lstrip()
629 break
630 break
630 if not fn:
631 if not fn:
631 fn = lambda s, c, **kwargs: util.filter(s, c)
632 fn = lambda s, c, **kwargs: util.filter(s, c)
632 # Wrap old filters not supporting keyword arguments
633 # Wrap old filters not supporting keyword arguments
633 if not inspect.getargspec(fn)[2]:
634 if not inspect.getargspec(fn)[2]:
634 oldfn = fn
635 oldfn = fn
635 fn = lambda s, c, **kwargs: oldfn(s, c)
636 fn = lambda s, c, **kwargs: oldfn(s, c)
636 l.append((mf, fn, params))
637 l.append((mf, fn, params))
637 self.filterpats[filter] = l
638 self.filterpats[filter] = l
638 return self.filterpats[filter]
639 return self.filterpats[filter]
639
640
640 def _filter(self, filterpats, filename, data):
641 def _filter(self, filterpats, filename, data):
641 for mf, fn, cmd in filterpats:
642 for mf, fn, cmd in filterpats:
642 if mf(filename):
643 if mf(filename):
643 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
644 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
644 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
645 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
645 break
646 break
646
647
647 return data
648 return data
648
649
649 @propertycache
650 @propertycache
650 def _encodefilterpats(self):
651 def _encodefilterpats(self):
651 return self._loadfilter('encode')
652 return self._loadfilter('encode')
652
653
653 @propertycache
654 @propertycache
654 def _decodefilterpats(self):
655 def _decodefilterpats(self):
655 return self._loadfilter('decode')
656 return self._loadfilter('decode')
656
657
657 def adddatafilter(self, name, filter):
658 def adddatafilter(self, name, filter):
658 self._datafilters[name] = filter
659 self._datafilters[name] = filter
659
660
660 def wread(self, filename):
661 def wread(self, filename):
661 if self._link(filename):
662 if self._link(filename):
662 data = os.readlink(self.wjoin(filename))
663 data = os.readlink(self.wjoin(filename))
663 else:
664 else:
664 data = self.wopener.read(filename)
665 data = self.wopener.read(filename)
665 return self._filter(self._encodefilterpats, filename, data)
666 return self._filter(self._encodefilterpats, filename, data)
666
667
667 def wwrite(self, filename, data, flags):
668 def wwrite(self, filename, data, flags):
668 data = self._filter(self._decodefilterpats, filename, data)
669 data = self._filter(self._decodefilterpats, filename, data)
669 if 'l' in flags:
670 if 'l' in flags:
670 self.wopener.symlink(data, filename)
671 self.wopener.symlink(data, filename)
671 else:
672 else:
672 self.wopener.write(filename, data)
673 self.wopener.write(filename, data)
673 if 'x' in flags:
674 if 'x' in flags:
674 util.setflags(self.wjoin(filename), False, True)
675 util.setflags(self.wjoin(filename), False, True)
675
676
676 def wwritedata(self, filename, data):
677 def wwritedata(self, filename, data):
677 return self._filter(self._decodefilterpats, filename, data)
678 return self._filter(self._decodefilterpats, filename, data)
678
679
679 def transaction(self, desc):
680 def transaction(self, desc):
680 tr = self._transref and self._transref() or None
681 tr = self._transref and self._transref() or None
681 if tr and tr.running():
682 if tr and tr.running():
682 return tr.nest()
683 return tr.nest()
683
684
684 # abort here if the journal already exists
685 # abort here if the journal already exists
685 if os.path.exists(self.sjoin("journal")):
686 if os.path.exists(self.sjoin("journal")):
686 raise error.RepoError(
687 raise error.RepoError(
687 _("abandoned transaction found - run hg recover"))
688 _("abandoned transaction found - run hg recover"))
688
689
689 journalfiles = self._writejournal(desc)
690 journalfiles = self._writejournal(desc)
690 renames = [(x, undoname(x)) for x in journalfiles]
691 renames = [(x, undoname(x)) for x in journalfiles]
691
692
692 tr = transaction.transaction(self.ui.warn, self.sopener,
693 tr = transaction.transaction(self.ui.warn, self.sopener,
693 self.sjoin("journal"),
694 self.sjoin("journal"),
694 aftertrans(renames),
695 aftertrans(renames),
695 self.store.createmode)
696 self.store.createmode)
696 self._transref = weakref.ref(tr)
697 self._transref = weakref.ref(tr)
697 return tr
698 return tr
698
699
699 def _writejournal(self, desc):
700 def _writejournal(self, desc):
700 # save dirstate for rollback
701 # save dirstate for rollback
701 try:
702 try:
702 ds = self.opener.read("dirstate")
703 ds = self.opener.read("dirstate")
703 except IOError:
704 except IOError:
704 ds = ""
705 ds = ""
705 self.opener.write("journal.dirstate", ds)
706 self.opener.write("journal.dirstate", ds)
706 self.opener.write("journal.branch",
707 self.opener.write("journal.branch",
707 encoding.fromlocal(self.dirstate.branch()))
708 encoding.fromlocal(self.dirstate.branch()))
708 self.opener.write("journal.desc",
709 self.opener.write("journal.desc",
709 "%d\n%s\n" % (len(self), desc))
710 "%d\n%s\n" % (len(self), desc))
710
711
711 bkname = self.join('bookmarks')
712 bkname = self.join('bookmarks')
712 if os.path.exists(bkname):
713 if os.path.exists(bkname):
713 util.copyfile(bkname, self.join('journal.bookmarks'))
714 util.copyfile(bkname, self.join('journal.bookmarks'))
714 else:
715 else:
715 self.opener.write('journal.bookmarks', '')
716 self.opener.write('journal.bookmarks', '')
716
717
717 return (self.sjoin('journal'), self.join('journal.dirstate'),
718 return (self.sjoin('journal'), self.join('journal.dirstate'),
718 self.join('journal.branch'), self.join('journal.desc'),
719 self.join('journal.branch'), self.join('journal.desc'),
719 self.join('journal.bookmarks'))
720 self.join('journal.bookmarks'))
720
721
721 def recover(self):
722 def recover(self):
722 lock = self.lock()
723 lock = self.lock()
723 try:
724 try:
724 if os.path.exists(self.sjoin("journal")):
725 if os.path.exists(self.sjoin("journal")):
725 self.ui.status(_("rolling back interrupted transaction\n"))
726 self.ui.status(_("rolling back interrupted transaction\n"))
726 transaction.rollback(self.sopener, self.sjoin("journal"),
727 transaction.rollback(self.sopener, self.sjoin("journal"),
727 self.ui.warn)
728 self.ui.warn)
728 self.invalidate()
729 self.invalidate()
729 return True
730 return True
730 else:
731 else:
731 self.ui.warn(_("no interrupted transaction available\n"))
732 self.ui.warn(_("no interrupted transaction available\n"))
732 return False
733 return False
733 finally:
734 finally:
734 lock.release()
735 lock.release()
735
736
736 def rollback(self, dryrun=False):
737 def rollback(self, dryrun=False):
737 wlock = lock = None
738 wlock = lock = None
738 try:
739 try:
739 wlock = self.wlock()
740 wlock = self.wlock()
740 lock = self.lock()
741 lock = self.lock()
741 if os.path.exists(self.sjoin("undo")):
742 if os.path.exists(self.sjoin("undo")):
742 try:
743 try:
743 args = self.opener.read("undo.desc").splitlines()
744 args = self.opener.read("undo.desc").splitlines()
744 if len(args) >= 3 and self.ui.verbose:
745 if len(args) >= 3 and self.ui.verbose:
745 desc = _("repository tip rolled back to revision %s"
746 desc = _("repository tip rolled back to revision %s"
746 " (undo %s: %s)\n") % (
747 " (undo %s: %s)\n") % (
747 int(args[0]) - 1, args[1], args[2])
748 int(args[0]) - 1, args[1], args[2])
748 elif len(args) >= 2:
749 elif len(args) >= 2:
749 desc = _("repository tip rolled back to revision %s"
750 desc = _("repository tip rolled back to revision %s"
750 " (undo %s)\n") % (
751 " (undo %s)\n") % (
751 int(args[0]) - 1, args[1])
752 int(args[0]) - 1, args[1])
752 except IOError:
753 except IOError:
753 desc = _("rolling back unknown transaction\n")
754 desc = _("rolling back unknown transaction\n")
754 self.ui.status(desc)
755 self.ui.status(desc)
755 if dryrun:
756 if dryrun:
756 return
757 return
757 transaction.rollback(self.sopener, self.sjoin("undo"),
758 transaction.rollback(self.sopener, self.sjoin("undo"),
758 self.ui.warn)
759 self.ui.warn)
759 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
760 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
760 if os.path.exists(self.join('undo.bookmarks')):
761 if os.path.exists(self.join('undo.bookmarks')):
761 util.rename(self.join('undo.bookmarks'),
762 util.rename(self.join('undo.bookmarks'),
762 self.join('bookmarks'))
763 self.join('bookmarks'))
763 try:
764 try:
764 branch = self.opener.read("undo.branch")
765 branch = self.opener.read("undo.branch")
765 self.dirstate.setbranch(branch)
766 self.dirstate.setbranch(branch)
766 except IOError:
767 except IOError:
767 self.ui.warn(_("named branch could not be reset, "
768 self.ui.warn(_("named branch could not be reset, "
768 "current branch is still: %s\n")
769 "current branch is still: %s\n")
769 % self.dirstate.branch())
770 % self.dirstate.branch())
770 self.invalidate()
771 self.invalidate()
771 self.dirstate.invalidate()
772 self.dirstate.invalidate()
772 self.destroyed()
773 self.destroyed()
773 parents = tuple([p.rev() for p in self.parents()])
774 parents = tuple([p.rev() for p in self.parents()])
774 if len(parents) > 1:
775 if len(parents) > 1:
775 self.ui.status(_("working directory now based on "
776 self.ui.status(_("working directory now based on "
776 "revisions %d and %d\n") % parents)
777 "revisions %d and %d\n") % parents)
777 else:
778 else:
778 self.ui.status(_("working directory now based on "
779 self.ui.status(_("working directory now based on "
779 "revision %d\n") % parents)
780 "revision %d\n") % parents)
780 else:
781 else:
781 self.ui.warn(_("no rollback information available\n"))
782 self.ui.warn(_("no rollback information available\n"))
782 return 1
783 return 1
783 finally:
784 finally:
784 release(lock, wlock)
785 release(lock, wlock)
785
786
786 def invalidatecaches(self):
787 def invalidatecaches(self):
787 self._tags = None
788 self._tags = None
788 self._tagtypes = None
789 self._tagtypes = None
789 self.nodetagscache = None
790 self.nodetagscache = None
790 self._branchcache = None # in UTF-8
791 self._branchcache = None # in UTF-8
791 self._branchcachetip = None
792 self._branchcachetip = None
792
793
793 def invalidate(self):
794 def invalidate(self):
794 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
795 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
795 if a in self.__dict__:
796 if a in self.__dict__:
796 delattr(self, a)
797 delattr(self, a)
797 self.invalidatecaches()
798 self.invalidatecaches()
798
799
799 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
800 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
800 try:
801 try:
801 l = lock.lock(lockname, 0, releasefn, desc=desc)
802 l = lock.lock(lockname, 0, releasefn, desc=desc)
802 except error.LockHeld, inst:
803 except error.LockHeld, inst:
803 if not wait:
804 if not wait:
804 raise
805 raise
805 self.ui.warn(_("waiting for lock on %s held by %r\n") %
806 self.ui.warn(_("waiting for lock on %s held by %r\n") %
806 (desc, inst.locker))
807 (desc, inst.locker))
807 # default to 600 seconds timeout
808 # default to 600 seconds timeout
808 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
809 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
809 releasefn, desc=desc)
810 releasefn, desc=desc)
810 if acquirefn:
811 if acquirefn:
811 acquirefn()
812 acquirefn()
812 return l
813 return l
813
814
814 def lock(self, wait=True):
815 def lock(self, wait=True):
815 '''Lock the repository store (.hg/store) and return a weak reference
816 '''Lock the repository store (.hg/store) and return a weak reference
816 to the lock. Use this before modifying the store (e.g. committing or
817 to the lock. Use this before modifying the store (e.g. committing or
817 stripping). If you are opening a transaction, get a lock as well.)'''
818 stripping). If you are opening a transaction, get a lock as well.)'''
818 l = self._lockref and self._lockref()
819 l = self._lockref and self._lockref()
819 if l is not None and l.held:
820 if l is not None and l.held:
820 l.lock()
821 l.lock()
821 return l
822 return l
822
823
823 l = self._lock(self.sjoin("lock"), wait, self.store.write,
824 l = self._lock(self.sjoin("lock"), wait, self.store.write,
824 self.invalidate, _('repository %s') % self.origroot)
825 self.invalidate, _('repository %s') % self.origroot)
825 self._lockref = weakref.ref(l)
826 self._lockref = weakref.ref(l)
826 return l
827 return l
827
828
828 def wlock(self, wait=True):
829 def wlock(self, wait=True):
829 '''Lock the non-store parts of the repository (everything under
830 '''Lock the non-store parts of the repository (everything under
830 .hg except .hg/store) and return a weak reference to the lock.
831 .hg except .hg/store) and return a weak reference to the lock.
831 Use this before modifying files in .hg.'''
832 Use this before modifying files in .hg.'''
832 l = self._wlockref and self._wlockref()
833 l = self._wlockref and self._wlockref()
833 if l is not None and l.held:
834 if l is not None and l.held:
834 l.lock()
835 l.lock()
835 return l
836 return l
836
837
837 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
838 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
838 self.dirstate.invalidate, _('working directory of %s') %
839 self.dirstate.invalidate, _('working directory of %s') %
839 self.origroot)
840 self.origroot)
840 self._wlockref = weakref.ref(l)
841 self._wlockref = weakref.ref(l)
841 return l
842 return l
842
843
843 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
844 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
844 """
845 """
845 commit an individual file as part of a larger transaction
846 commit an individual file as part of a larger transaction
846 """
847 """
847
848
848 fname = fctx.path()
849 fname = fctx.path()
849 text = fctx.data()
850 text = fctx.data()
850 flog = self.file(fname)
851 flog = self.file(fname)
851 fparent1 = manifest1.get(fname, nullid)
852 fparent1 = manifest1.get(fname, nullid)
852 fparent2 = fparent2o = manifest2.get(fname, nullid)
853 fparent2 = fparent2o = manifest2.get(fname, nullid)
853
854
854 meta = {}
855 meta = {}
855 copy = fctx.renamed()
856 copy = fctx.renamed()
856 if copy and copy[0] != fname:
857 if copy and copy[0] != fname:
857 # Mark the new revision of this file as a copy of another
858 # Mark the new revision of this file as a copy of another
858 # file. This copy data will effectively act as a parent
859 # file. This copy data will effectively act as a parent
859 # of this new revision. If this is a merge, the first
860 # of this new revision. If this is a merge, the first
860 # parent will be the nullid (meaning "look up the copy data")
861 # parent will be the nullid (meaning "look up the copy data")
861 # and the second one will be the other parent. For example:
862 # and the second one will be the other parent. For example:
862 #
863 #
863 # 0 --- 1 --- 3 rev1 changes file foo
864 # 0 --- 1 --- 3 rev1 changes file foo
864 # \ / rev2 renames foo to bar and changes it
865 # \ / rev2 renames foo to bar and changes it
865 # \- 2 -/ rev3 should have bar with all changes and
866 # \- 2 -/ rev3 should have bar with all changes and
866 # should record that bar descends from
867 # should record that bar descends from
867 # bar in rev2 and foo in rev1
868 # bar in rev2 and foo in rev1
868 #
869 #
869 # this allows this merge to succeed:
870 # this allows this merge to succeed:
870 #
871 #
871 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
872 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
872 # \ / merging rev3 and rev4 should use bar@rev2
873 # \ / merging rev3 and rev4 should use bar@rev2
873 # \- 2 --- 4 as the merge base
874 # \- 2 --- 4 as the merge base
874 #
875 #
875
876
876 cfname = copy[0]
877 cfname = copy[0]
877 crev = manifest1.get(cfname)
878 crev = manifest1.get(cfname)
878 newfparent = fparent2
879 newfparent = fparent2
879
880
880 if manifest2: # branch merge
881 if manifest2: # branch merge
881 if fparent2 == nullid or crev is None: # copied on remote side
882 if fparent2 == nullid or crev is None: # copied on remote side
882 if cfname in manifest2:
883 if cfname in manifest2:
883 crev = manifest2[cfname]
884 crev = manifest2[cfname]
884 newfparent = fparent1
885 newfparent = fparent1
885
886
886 # find source in nearest ancestor if we've lost track
887 # find source in nearest ancestor if we've lost track
887 if not crev:
888 if not crev:
888 self.ui.debug(" %s: searching for copy revision for %s\n" %
889 self.ui.debug(" %s: searching for copy revision for %s\n" %
889 (fname, cfname))
890 (fname, cfname))
890 for ancestor in self[None].ancestors():
891 for ancestor in self[None].ancestors():
891 if cfname in ancestor:
892 if cfname in ancestor:
892 crev = ancestor[cfname].filenode()
893 crev = ancestor[cfname].filenode()
893 break
894 break
894
895
895 if crev:
896 if crev:
896 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
897 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
897 meta["copy"] = cfname
898 meta["copy"] = cfname
898 meta["copyrev"] = hex(crev)
899 meta["copyrev"] = hex(crev)
899 fparent1, fparent2 = nullid, newfparent
900 fparent1, fparent2 = nullid, newfparent
900 else:
901 else:
901 self.ui.warn(_("warning: can't find ancestor for '%s' "
902 self.ui.warn(_("warning: can't find ancestor for '%s' "
902 "copied from '%s'!\n") % (fname, cfname))
903 "copied from '%s'!\n") % (fname, cfname))
903
904
904 elif fparent2 != nullid:
905 elif fparent2 != nullid:
905 # is one parent an ancestor of the other?
906 # is one parent an ancestor of the other?
906 fparentancestor = flog.ancestor(fparent1, fparent2)
907 fparentancestor = flog.ancestor(fparent1, fparent2)
907 if fparentancestor == fparent1:
908 if fparentancestor == fparent1:
908 fparent1, fparent2 = fparent2, nullid
909 fparent1, fparent2 = fparent2, nullid
909 elif fparentancestor == fparent2:
910 elif fparentancestor == fparent2:
910 fparent2 = nullid
911 fparent2 = nullid
911
912
912 # is the file changed?
913 # is the file changed?
913 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
914 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
914 changelist.append(fname)
915 changelist.append(fname)
915 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
916 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
916
917
917 # are just the flags changed during merge?
918 # are just the flags changed during merge?
918 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
919 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
919 changelist.append(fname)
920 changelist.append(fname)
920
921
921 return fparent1
922 return fparent1
922
923
923 def commit(self, text="", user=None, date=None, match=None, force=False,
924 def commit(self, text="", user=None, date=None, match=None, force=False,
924 editor=False, extra={}):
925 editor=False, extra={}):
925 """Add a new revision to current repository.
926 """Add a new revision to current repository.
926
927
927 Revision information is gathered from the working directory,
928 Revision information is gathered from the working directory,
928 match can be used to filter the committed files. If editor is
929 match can be used to filter the committed files. If editor is
929 supplied, it is called to get a commit message.
930 supplied, it is called to get a commit message.
930 """
931 """
931
932
932 def fail(f, msg):
933 def fail(f, msg):
933 raise util.Abort('%s: %s' % (f, msg))
934 raise util.Abort('%s: %s' % (f, msg))
934
935
935 if not match:
936 if not match:
936 match = matchmod.always(self.root, '')
937 match = matchmod.always(self.root, '')
937
938
938 if not force:
939 if not force:
939 vdirs = []
940 vdirs = []
940 match.dir = vdirs.append
941 match.dir = vdirs.append
941 match.bad = fail
942 match.bad = fail
942
943
943 wlock = self.wlock()
944 wlock = self.wlock()
944 try:
945 try:
945 wctx = self[None]
946 wctx = self[None]
946 merge = len(wctx.parents()) > 1
947 merge = len(wctx.parents()) > 1
947
948
948 if (not force and merge and match and
949 if (not force and merge and match and
949 (match.files() or match.anypats())):
950 (match.files() or match.anypats())):
950 raise util.Abort(_('cannot partially commit a merge '
951 raise util.Abort(_('cannot partially commit a merge '
951 '(do not specify files or patterns)'))
952 '(do not specify files or patterns)'))
952
953
953 changes = self.status(match=match, clean=force)
954 changes = self.status(match=match, clean=force)
954 if force:
955 if force:
955 changes[0].extend(changes[6]) # mq may commit unchanged files
956 changes[0].extend(changes[6]) # mq may commit unchanged files
956
957
957 # check subrepos
958 # check subrepos
958 subs = []
959 subs = []
959 removedsubs = set()
960 removedsubs = set()
960 if '.hgsub' in wctx:
961 if '.hgsub' in wctx:
961 # only manage subrepos and .hgsubstate if .hgsub is present
962 # only manage subrepos and .hgsubstate if .hgsub is present
962 for p in wctx.parents():
963 for p in wctx.parents():
963 removedsubs.update(s for s in p.substate if match(s))
964 removedsubs.update(s for s in p.substate if match(s))
964 for s in wctx.substate:
965 for s in wctx.substate:
965 removedsubs.discard(s)
966 removedsubs.discard(s)
966 if match(s) and wctx.sub(s).dirty():
967 if match(s) and wctx.sub(s).dirty():
967 subs.append(s)
968 subs.append(s)
968 if (subs or removedsubs):
969 if (subs or removedsubs):
969 if (not match('.hgsub') and
970 if (not match('.hgsub') and
970 '.hgsub' in (wctx.modified() + wctx.added())):
971 '.hgsub' in (wctx.modified() + wctx.added())):
971 raise util.Abort(
972 raise util.Abort(
972 _("can't commit subrepos without .hgsub"))
973 _("can't commit subrepos without .hgsub"))
973 if '.hgsubstate' not in changes[0]:
974 if '.hgsubstate' not in changes[0]:
974 changes[0].insert(0, '.hgsubstate')
975 changes[0].insert(0, '.hgsubstate')
975 if '.hgsubstate' in changes[2]:
976 if '.hgsubstate' in changes[2]:
976 changes[2].remove('.hgsubstate')
977 changes[2].remove('.hgsubstate')
977 elif '.hgsub' in changes[2]:
978 elif '.hgsub' in changes[2]:
978 # clean up .hgsubstate when .hgsub is removed
979 # clean up .hgsubstate when .hgsub is removed
979 if ('.hgsubstate' in wctx and
980 if ('.hgsubstate' in wctx and
980 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
981 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
981 changes[2].insert(0, '.hgsubstate')
982 changes[2].insert(0, '.hgsubstate')
982
983
983 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
984 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
984 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
985 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
985 if changedsubs:
986 if changedsubs:
986 raise util.Abort(_("uncommitted changes in subrepo %s")
987 raise util.Abort(_("uncommitted changes in subrepo %s")
987 % changedsubs[0])
988 % changedsubs[0])
988
989
989 # make sure all explicit patterns are matched
990 # make sure all explicit patterns are matched
990 if not force and match.files():
991 if not force and match.files():
991 matched = set(changes[0] + changes[1] + changes[2])
992 matched = set(changes[0] + changes[1] + changes[2])
992
993
993 for f in match.files():
994 for f in match.files():
994 if f == '.' or f in matched or f in wctx.substate:
995 if f == '.' or f in matched or f in wctx.substate:
995 continue
996 continue
996 if f in changes[3]: # missing
997 if f in changes[3]: # missing
997 fail(f, _('file not found!'))
998 fail(f, _('file not found!'))
998 if f in vdirs: # visited directory
999 if f in vdirs: # visited directory
999 d = f + '/'
1000 d = f + '/'
1000 for mf in matched:
1001 for mf in matched:
1001 if mf.startswith(d):
1002 if mf.startswith(d):
1002 break
1003 break
1003 else:
1004 else:
1004 fail(f, _("no match under directory!"))
1005 fail(f, _("no match under directory!"))
1005 elif f not in self.dirstate:
1006 elif f not in self.dirstate:
1006 fail(f, _("file not tracked!"))
1007 fail(f, _("file not tracked!"))
1007
1008
1008 if (not force and not extra.get("close") and not merge
1009 if (not force and not extra.get("close") and not merge
1009 and not (changes[0] or changes[1] or changes[2])
1010 and not (changes[0] or changes[1] or changes[2])
1010 and wctx.branch() == wctx.p1().branch()):
1011 and wctx.branch() == wctx.p1().branch()):
1011 return None
1012 return None
1012
1013
1013 ms = mergemod.mergestate(self)
1014 ms = mergemod.mergestate(self)
1014 for f in changes[0]:
1015 for f in changes[0]:
1015 if f in ms and ms[f] == 'u':
1016 if f in ms and ms[f] == 'u':
1016 raise util.Abort(_("unresolved merge conflicts "
1017 raise util.Abort(_("unresolved merge conflicts "
1017 "(see hg help resolve)"))
1018 "(see hg help resolve)"))
1018
1019
1019 cctx = context.workingctx(self, text, user, date, extra, changes)
1020 cctx = context.workingctx(self, text, user, date, extra, changes)
1020 if editor:
1021 if editor:
1021 cctx._text = editor(self, cctx, subs)
1022 cctx._text = editor(self, cctx, subs)
1022 edited = (text != cctx._text)
1023 edited = (text != cctx._text)
1023
1024
1024 # commit subs
1025 # commit subs
1025 if subs or removedsubs:
1026 if subs or removedsubs:
1026 state = wctx.substate.copy()
1027 state = wctx.substate.copy()
1027 for s in sorted(subs):
1028 for s in sorted(subs):
1028 sub = wctx.sub(s)
1029 sub = wctx.sub(s)
1029 self.ui.status(_('committing subrepository %s\n') %
1030 self.ui.status(_('committing subrepository %s\n') %
1030 subrepo.subrelpath(sub))
1031 subrepo.subrelpath(sub))
1031 sr = sub.commit(cctx._text, user, date)
1032 sr = sub.commit(cctx._text, user, date)
1032 state[s] = (state[s][0], sr)
1033 state[s] = (state[s][0], sr)
1033 subrepo.writestate(self, state)
1034 subrepo.writestate(self, state)
1034
1035
1035 # Save commit message in case this transaction gets rolled back
1036 # Save commit message in case this transaction gets rolled back
1036 # (e.g. by a pretxncommit hook). Leave the content alone on
1037 # (e.g. by a pretxncommit hook). Leave the content alone on
1037 # the assumption that the user will use the same editor again.
1038 # the assumption that the user will use the same editor again.
1038 msgfn = self.savecommitmessage(cctx._text)
1039 msgfn = self.savecommitmessage(cctx._text)
1039
1040
1040 p1, p2 = self.dirstate.parents()
1041 p1, p2 = self.dirstate.parents()
1041 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1042 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1042 try:
1043 try:
1043 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1044 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1044 ret = self.commitctx(cctx, True)
1045 ret = self.commitctx(cctx, True)
1045 except:
1046 except:
1046 if edited:
1047 if edited:
1047 self.ui.write(
1048 self.ui.write(
1048 _('note: commit message saved in %s\n') % msgfn)
1049 _('note: commit message saved in %s\n') % msgfn)
1049 raise
1050 raise
1050
1051
1051 # update bookmarks, dirstate and mergestate
1052 # update bookmarks, dirstate and mergestate
1052 bookmarks.update(self, p1, ret)
1053 bookmarks.update(self, p1, ret)
1053 for f in changes[0] + changes[1]:
1054 for f in changes[0] + changes[1]:
1054 self.dirstate.normal(f)
1055 self.dirstate.normal(f)
1055 for f in changes[2]:
1056 for f in changes[2]:
1056 self.dirstate.drop(f)
1057 self.dirstate.drop(f)
1057 self.dirstate.setparents(ret)
1058 self.dirstate.setparents(ret)
1058 ms.reset()
1059 ms.reset()
1059 finally:
1060 finally:
1060 wlock.release()
1061 wlock.release()
1061
1062
1062 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1063 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1063 return ret
1064 return ret
1064
1065
1065 def commitctx(self, ctx, error=False):
1066 def commitctx(self, ctx, error=False):
1066 """Add a new revision to current repository.
1067 """Add a new revision to current repository.
1067 Revision information is passed via the context argument.
1068 Revision information is passed via the context argument.
1068 """
1069 """
1069
1070
1070 tr = lock = None
1071 tr = lock = None
1071 removed = list(ctx.removed())
1072 removed = list(ctx.removed())
1072 p1, p2 = ctx.p1(), ctx.p2()
1073 p1, p2 = ctx.p1(), ctx.p2()
1073 user = ctx.user()
1074 user = ctx.user()
1074
1075
1075 lock = self.lock()
1076 lock = self.lock()
1076 try:
1077 try:
1077 tr = self.transaction("commit")
1078 tr = self.transaction("commit")
1078 trp = weakref.proxy(tr)
1079 trp = weakref.proxy(tr)
1079
1080
1080 if ctx.files():
1081 if ctx.files():
1081 m1 = p1.manifest().copy()
1082 m1 = p1.manifest().copy()
1082 m2 = p2.manifest()
1083 m2 = p2.manifest()
1083
1084
1084 # check in files
1085 # check in files
1085 new = {}
1086 new = {}
1086 changed = []
1087 changed = []
1087 linkrev = len(self)
1088 linkrev = len(self)
1088 for f in sorted(ctx.modified() + ctx.added()):
1089 for f in sorted(ctx.modified() + ctx.added()):
1089 self.ui.note(f + "\n")
1090 self.ui.note(f + "\n")
1090 try:
1091 try:
1091 fctx = ctx[f]
1092 fctx = ctx[f]
1092 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1093 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1093 changed)
1094 changed)
1094 m1.set(f, fctx.flags())
1095 m1.set(f, fctx.flags())
1095 except OSError, inst:
1096 except OSError, inst:
1096 self.ui.warn(_("trouble committing %s!\n") % f)
1097 self.ui.warn(_("trouble committing %s!\n") % f)
1097 raise
1098 raise
1098 except IOError, inst:
1099 except IOError, inst:
1099 errcode = getattr(inst, 'errno', errno.ENOENT)
1100 errcode = getattr(inst, 'errno', errno.ENOENT)
1100 if error or errcode and errcode != errno.ENOENT:
1101 if error or errcode and errcode != errno.ENOENT:
1101 self.ui.warn(_("trouble committing %s!\n") % f)
1102 self.ui.warn(_("trouble committing %s!\n") % f)
1102 raise
1103 raise
1103 else:
1104 else:
1104 removed.append(f)
1105 removed.append(f)
1105
1106
1106 # update manifest
1107 # update manifest
1107 m1.update(new)
1108 m1.update(new)
1108 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1109 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1109 drop = [f for f in removed if f in m1]
1110 drop = [f for f in removed if f in m1]
1110 for f in drop:
1111 for f in drop:
1111 del m1[f]
1112 del m1[f]
1112 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1113 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1113 p2.manifestnode(), (new, drop))
1114 p2.manifestnode(), (new, drop))
1114 files = changed + removed
1115 files = changed + removed
1115 else:
1116 else:
1116 mn = p1.manifestnode()
1117 mn = p1.manifestnode()
1117 files = []
1118 files = []
1118
1119
1119 # update changelog
1120 # update changelog
1120 self.changelog.delayupdate()
1121 self.changelog.delayupdate()
1121 n = self.changelog.add(mn, files, ctx.description(),
1122 n = self.changelog.add(mn, files, ctx.description(),
1122 trp, p1.node(), p2.node(),
1123 trp, p1.node(), p2.node(),
1123 user, ctx.date(), ctx.extra().copy())
1124 user, ctx.date(), ctx.extra().copy())
1124 p = lambda: self.changelog.writepending() and self.root or ""
1125 p = lambda: self.changelog.writepending() and self.root or ""
1125 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1126 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1126 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1127 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1127 parent2=xp2, pending=p)
1128 parent2=xp2, pending=p)
1128 self.changelog.finalize(trp)
1129 self.changelog.finalize(trp)
1129 tr.close()
1130 tr.close()
1130
1131
1131 if self._branchcache:
1132 if self._branchcache:
1132 self.updatebranchcache()
1133 self.updatebranchcache()
1133 return n
1134 return n
1134 finally:
1135 finally:
1135 if tr:
1136 if tr:
1136 tr.release()
1137 tr.release()
1137 lock.release()
1138 lock.release()
1138
1139
1139 def destroyed(self):
1140 def destroyed(self):
1140 '''Inform the repository that nodes have been destroyed.
1141 '''Inform the repository that nodes have been destroyed.
1141 Intended for use by strip and rollback, so there's a common
1142 Intended for use by strip and rollback, so there's a common
1142 place for anything that has to be done after destroying history.'''
1143 place for anything that has to be done after destroying history.'''
1143 # XXX it might be nice if we could take the list of destroyed
1144 # XXX it might be nice if we could take the list of destroyed
1144 # nodes, but I don't see an easy way for rollback() to do that
1145 # nodes, but I don't see an easy way for rollback() to do that
1145
1146
1146 # Ensure the persistent tag cache is updated. Doing it now
1147 # Ensure the persistent tag cache is updated. Doing it now
1147 # means that the tag cache only has to worry about destroyed
1148 # means that the tag cache only has to worry about destroyed
1148 # heads immediately after a strip/rollback. That in turn
1149 # heads immediately after a strip/rollback. That in turn
1149 # guarantees that "cachetip == currenttip" (comparing both rev
1150 # guarantees that "cachetip == currenttip" (comparing both rev
1150 # and node) always means no nodes have been added or destroyed.
1151 # and node) always means no nodes have been added or destroyed.
1151
1152
1152 # XXX this is suboptimal when qrefresh'ing: we strip the current
1153 # XXX this is suboptimal when qrefresh'ing: we strip the current
1153 # head, refresh the tag cache, then immediately add a new head.
1154 # head, refresh the tag cache, then immediately add a new head.
1154 # But I think doing it this way is necessary for the "instant
1155 # But I think doing it this way is necessary for the "instant
1155 # tag cache retrieval" case to work.
1156 # tag cache retrieval" case to work.
1156 self.invalidatecaches()
1157 self.invalidatecaches()
1157
1158
1158 def walk(self, match, node=None):
1159 def walk(self, match, node=None):
1159 '''
1160 '''
1160 walk recursively through the directory tree or a given
1161 walk recursively through the directory tree or a given
1161 changeset, finding all files matched by the match
1162 changeset, finding all files matched by the match
1162 function
1163 function
1163 '''
1164 '''
1164 return self[node].walk(match)
1165 return self[node].walk(match)
1165
1166
1166 def status(self, node1='.', node2=None, match=None,
1167 def status(self, node1='.', node2=None, match=None,
1167 ignored=False, clean=False, unknown=False,
1168 ignored=False, clean=False, unknown=False,
1168 listsubrepos=False):
1169 listsubrepos=False):
1169 """return status of files between two nodes or node and working directory
1170 """return status of files between two nodes or node and working directory
1170
1171
1171 If node1 is None, use the first dirstate parent instead.
1172 If node1 is None, use the first dirstate parent instead.
1172 If node2 is None, compare node1 with working directory.
1173 If node2 is None, compare node1 with working directory.
1173 """
1174 """
1174
1175
1175 def mfmatches(ctx):
1176 def mfmatches(ctx):
1176 mf = ctx.manifest().copy()
1177 mf = ctx.manifest().copy()
1177 for fn in mf.keys():
1178 for fn in mf.keys():
1178 if not match(fn):
1179 if not match(fn):
1179 del mf[fn]
1180 del mf[fn]
1180 return mf
1181 return mf
1181
1182
1182 if isinstance(node1, context.changectx):
1183 if isinstance(node1, context.changectx):
1183 ctx1 = node1
1184 ctx1 = node1
1184 else:
1185 else:
1185 ctx1 = self[node1]
1186 ctx1 = self[node1]
1186 if isinstance(node2, context.changectx):
1187 if isinstance(node2, context.changectx):
1187 ctx2 = node2
1188 ctx2 = node2
1188 else:
1189 else:
1189 ctx2 = self[node2]
1190 ctx2 = self[node2]
1190
1191
1191 working = ctx2.rev() is None
1192 working = ctx2.rev() is None
1192 parentworking = working and ctx1 == self['.']
1193 parentworking = working and ctx1 == self['.']
1193 match = match or matchmod.always(self.root, self.getcwd())
1194 match = match or matchmod.always(self.root, self.getcwd())
1194 listignored, listclean, listunknown = ignored, clean, unknown
1195 listignored, listclean, listunknown = ignored, clean, unknown
1195
1196
1196 # load earliest manifest first for caching reasons
1197 # load earliest manifest first for caching reasons
1197 if not working and ctx2.rev() < ctx1.rev():
1198 if not working and ctx2.rev() < ctx1.rev():
1198 ctx2.manifest()
1199 ctx2.manifest()
1199
1200
1200 if not parentworking:
1201 if not parentworking:
1201 def bad(f, msg):
1202 def bad(f, msg):
1202 if f not in ctx1:
1203 if f not in ctx1:
1203 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1204 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1204 match.bad = bad
1205 match.bad = bad
1205
1206
1206 if working: # we need to scan the working dir
1207 if working: # we need to scan the working dir
1207 subrepos = []
1208 subrepos = []
1208 if '.hgsub' in self.dirstate:
1209 if '.hgsub' in self.dirstate:
1209 subrepos = ctx2.substate.keys()
1210 subrepos = ctx2.substate.keys()
1210 s = self.dirstate.status(match, subrepos, listignored,
1211 s = self.dirstate.status(match, subrepos, listignored,
1211 listclean, listunknown)
1212 listclean, listunknown)
1212 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1213 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1213
1214
1214 # check for any possibly clean files
1215 # check for any possibly clean files
1215 if parentworking and cmp:
1216 if parentworking and cmp:
1216 fixup = []
1217 fixup = []
1217 # do a full compare of any files that might have changed
1218 # do a full compare of any files that might have changed
1218 for f in sorted(cmp):
1219 for f in sorted(cmp):
1219 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1220 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1220 or ctx1[f].cmp(ctx2[f])):
1221 or ctx1[f].cmp(ctx2[f])):
1221 modified.append(f)
1222 modified.append(f)
1222 else:
1223 else:
1223 fixup.append(f)
1224 fixup.append(f)
1224
1225
1225 # update dirstate for files that are actually clean
1226 # update dirstate for files that are actually clean
1226 if fixup:
1227 if fixup:
1227 if listclean:
1228 if listclean:
1228 clean += fixup
1229 clean += fixup
1229
1230
1230 try:
1231 try:
1231 # updating the dirstate is optional
1232 # updating the dirstate is optional
1232 # so we don't wait on the lock
1233 # so we don't wait on the lock
1233 wlock = self.wlock(False)
1234 wlock = self.wlock(False)
1234 try:
1235 try:
1235 for f in fixup:
1236 for f in fixup:
1236 self.dirstate.normal(f)
1237 self.dirstate.normal(f)
1237 finally:
1238 finally:
1238 wlock.release()
1239 wlock.release()
1239 except error.LockError:
1240 except error.LockError:
1240 pass
1241 pass
1241
1242
1242 if not parentworking:
1243 if not parentworking:
1243 mf1 = mfmatches(ctx1)
1244 mf1 = mfmatches(ctx1)
1244 if working:
1245 if working:
1245 # we are comparing working dir against non-parent
1246 # we are comparing working dir against non-parent
1246 # generate a pseudo-manifest for the working dir
1247 # generate a pseudo-manifest for the working dir
1247 mf2 = mfmatches(self['.'])
1248 mf2 = mfmatches(self['.'])
1248 for f in cmp + modified + added:
1249 for f in cmp + modified + added:
1249 mf2[f] = None
1250 mf2[f] = None
1250 mf2.set(f, ctx2.flags(f))
1251 mf2.set(f, ctx2.flags(f))
1251 for f in removed:
1252 for f in removed:
1252 if f in mf2:
1253 if f in mf2:
1253 del mf2[f]
1254 del mf2[f]
1254 else:
1255 else:
1255 # we are comparing two revisions
1256 # we are comparing two revisions
1256 deleted, unknown, ignored = [], [], []
1257 deleted, unknown, ignored = [], [], []
1257 mf2 = mfmatches(ctx2)
1258 mf2 = mfmatches(ctx2)
1258
1259
1259 modified, added, clean = [], [], []
1260 modified, added, clean = [], [], []
1260 for fn in mf2:
1261 for fn in mf2:
1261 if fn in mf1:
1262 if fn in mf1:
1262 if (fn not in deleted and
1263 if (fn not in deleted and
1263 (mf1.flags(fn) != mf2.flags(fn) or
1264 (mf1.flags(fn) != mf2.flags(fn) or
1264 (mf1[fn] != mf2[fn] and
1265 (mf1[fn] != mf2[fn] and
1265 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1266 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1266 modified.append(fn)
1267 modified.append(fn)
1267 elif listclean:
1268 elif listclean:
1268 clean.append(fn)
1269 clean.append(fn)
1269 del mf1[fn]
1270 del mf1[fn]
1270 elif fn not in deleted:
1271 elif fn not in deleted:
1271 added.append(fn)
1272 added.append(fn)
1272 removed = mf1.keys()
1273 removed = mf1.keys()
1273
1274
1274 r = modified, added, removed, deleted, unknown, ignored, clean
1275 r = modified, added, removed, deleted, unknown, ignored, clean
1275
1276
1276 if listsubrepos:
1277 if listsubrepos:
1277 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1278 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1278 if working:
1279 if working:
1279 rev2 = None
1280 rev2 = None
1280 else:
1281 else:
1281 rev2 = ctx2.substate[subpath][1]
1282 rev2 = ctx2.substate[subpath][1]
1282 try:
1283 try:
1283 submatch = matchmod.narrowmatcher(subpath, match)
1284 submatch = matchmod.narrowmatcher(subpath, match)
1284 s = sub.status(rev2, match=submatch, ignored=listignored,
1285 s = sub.status(rev2, match=submatch, ignored=listignored,
1285 clean=listclean, unknown=listunknown,
1286 clean=listclean, unknown=listunknown,
1286 listsubrepos=True)
1287 listsubrepos=True)
1287 for rfiles, sfiles in zip(r, s):
1288 for rfiles, sfiles in zip(r, s):
1288 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1289 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1289 except error.LookupError:
1290 except error.LookupError:
1290 self.ui.status(_("skipping missing subrepository: %s\n")
1291 self.ui.status(_("skipping missing subrepository: %s\n")
1291 % subpath)
1292 % subpath)
1292
1293
1293 for l in r:
1294 for l in r:
1294 l.sort()
1295 l.sort()
1295 return r
1296 return r
1296
1297
1297 def heads(self, start=None):
1298 def heads(self, start=None):
1298 heads = self.changelog.heads(start)
1299 heads = self.changelog.heads(start)
1299 # sort the output in rev descending order
1300 # sort the output in rev descending order
1300 return sorted(heads, key=self.changelog.rev, reverse=True)
1301 return sorted(heads, key=self.changelog.rev, reverse=True)
1301
1302
1302 def branchheads(self, branch=None, start=None, closed=False):
1303 def branchheads(self, branch=None, start=None, closed=False):
1303 '''return a (possibly filtered) list of heads for the given branch
1304 '''return a (possibly filtered) list of heads for the given branch
1304
1305
1305 Heads are returned in topological order, from newest to oldest.
1306 Heads are returned in topological order, from newest to oldest.
1306 If branch is None, use the dirstate branch.
1307 If branch is None, use the dirstate branch.
1307 If start is not None, return only heads reachable from start.
1308 If start is not None, return only heads reachable from start.
1308 If closed is True, return heads that are marked as closed as well.
1309 If closed is True, return heads that are marked as closed as well.
1309 '''
1310 '''
1310 if branch is None:
1311 if branch is None:
1311 branch = self[None].branch()
1312 branch = self[None].branch()
1312 branches = self.branchmap()
1313 branches = self.branchmap()
1313 if branch not in branches:
1314 if branch not in branches:
1314 return []
1315 return []
1315 # the cache returns heads ordered lowest to highest
1316 # the cache returns heads ordered lowest to highest
1316 bheads = list(reversed(branches[branch]))
1317 bheads = list(reversed(branches[branch]))
1317 if start is not None:
1318 if start is not None:
1318 # filter out the heads that cannot be reached from startrev
1319 # filter out the heads that cannot be reached from startrev
1319 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1320 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1320 bheads = [h for h in bheads if h in fbheads]
1321 bheads = [h for h in bheads if h in fbheads]
1321 if not closed:
1322 if not closed:
1322 bheads = [h for h in bheads if
1323 bheads = [h for h in bheads if
1323 ('close' not in self.changelog.read(h)[5])]
1324 ('close' not in self.changelog.read(h)[5])]
1324 return bheads
1325 return bheads
1325
1326
1326 def branches(self, nodes):
1327 def branches(self, nodes):
1327 if not nodes:
1328 if not nodes:
1328 nodes = [self.changelog.tip()]
1329 nodes = [self.changelog.tip()]
1329 b = []
1330 b = []
1330 for n in nodes:
1331 for n in nodes:
1331 t = n
1332 t = n
1332 while True:
1333 while True:
1333 p = self.changelog.parents(n)
1334 p = self.changelog.parents(n)
1334 if p[1] != nullid or p[0] == nullid:
1335 if p[1] != nullid or p[0] == nullid:
1335 b.append((t, n, p[0], p[1]))
1336 b.append((t, n, p[0], p[1]))
1336 break
1337 break
1337 n = p[0]
1338 n = p[0]
1338 return b
1339 return b
1339
1340
1340 def between(self, pairs):
1341 def between(self, pairs):
1341 r = []
1342 r = []
1342
1343
1343 for top, bottom in pairs:
1344 for top, bottom in pairs:
1344 n, l, i = top, [], 0
1345 n, l, i = top, [], 0
1345 f = 1
1346 f = 1
1346
1347
1347 while n != bottom and n != nullid:
1348 while n != bottom and n != nullid:
1348 p = self.changelog.parents(n)[0]
1349 p = self.changelog.parents(n)[0]
1349 if i == f:
1350 if i == f:
1350 l.append(n)
1351 l.append(n)
1351 f = f * 2
1352 f = f * 2
1352 n = p
1353 n = p
1353 i += 1
1354 i += 1
1354
1355
1355 r.append(l)
1356 r.append(l)
1356
1357
1357 return r
1358 return r
1358
1359
1359 def pull(self, remote, heads=None, force=False):
1360 def pull(self, remote, heads=None, force=False):
1360 lock = self.lock()
1361 lock = self.lock()
1361 try:
1362 try:
1362 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1363 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1363 force=force)
1364 force=force)
1364 common, fetch, rheads = tmp
1365 common, fetch, rheads = tmp
1365 if not fetch:
1366 if not fetch:
1366 self.ui.status(_("no changes found\n"))
1367 self.ui.status(_("no changes found\n"))
1367 result = 0
1368 result = 0
1368 else:
1369 else:
1369 if heads is None and list(common) == [nullid]:
1370 if heads is None and list(common) == [nullid]:
1370 self.ui.status(_("requesting all changes\n"))
1371 self.ui.status(_("requesting all changes\n"))
1371 elif heads is None and remote.capable('changegroupsubset'):
1372 elif heads is None and remote.capable('changegroupsubset'):
1372 # issue1320, avoid a race if remote changed after discovery
1373 # issue1320, avoid a race if remote changed after discovery
1373 heads = rheads
1374 heads = rheads
1374
1375
1375 if remote.capable('getbundle'):
1376 if remote.capable('getbundle'):
1376 cg = remote.getbundle('pull', common=common,
1377 cg = remote.getbundle('pull', common=common,
1377 heads=heads or rheads)
1378 heads=heads or rheads)
1378 elif heads is None:
1379 elif heads is None:
1379 cg = remote.changegroup(fetch, 'pull')
1380 cg = remote.changegroup(fetch, 'pull')
1380 elif not remote.capable('changegroupsubset'):
1381 elif not remote.capable('changegroupsubset'):
1381 raise util.Abort(_("partial pull cannot be done because "
1382 raise util.Abort(_("partial pull cannot be done because "
1382 "other repository doesn't support "
1383 "other repository doesn't support "
1383 "changegroupsubset."))
1384 "changegroupsubset."))
1384 else:
1385 else:
1385 cg = remote.changegroupsubset(fetch, heads, 'pull')
1386 cg = remote.changegroupsubset(fetch, heads, 'pull')
1386 result = self.addchangegroup(cg, 'pull', remote.url(),
1387 result = self.addchangegroup(cg, 'pull', remote.url(),
1387 lock=lock)
1388 lock=lock)
1388 finally:
1389 finally:
1389 lock.release()
1390 lock.release()
1390
1391
1391 return result
1392 return result
1392
1393
1393 def checkpush(self, force, revs):
1394 def checkpush(self, force, revs):
1394 """Extensions can override this function if additional checks have
1395 """Extensions can override this function if additional checks have
1395 to be performed before pushing, or call it if they override push
1396 to be performed before pushing, or call it if they override push
1396 command.
1397 command.
1397 """
1398 """
1398 pass
1399 pass
1399
1400
1400 def push(self, remote, force=False, revs=None, newbranch=False):
1401 def push(self, remote, force=False, revs=None, newbranch=False):
1401 '''Push outgoing changesets (limited by revs) from the current
1402 '''Push outgoing changesets (limited by revs) from the current
1402 repository to remote. Return an integer:
1403 repository to remote. Return an integer:
1403 - 0 means HTTP error *or* nothing to push
1404 - 0 means HTTP error *or* nothing to push
1404 - 1 means we pushed and remote head count is unchanged *or*
1405 - 1 means we pushed and remote head count is unchanged *or*
1405 we have outgoing changesets but refused to push
1406 we have outgoing changesets but refused to push
1406 - other values as described by addchangegroup()
1407 - other values as described by addchangegroup()
1407 '''
1408 '''
1408 # there are two ways to push to remote repo:
1409 # there are two ways to push to remote repo:
1409 #
1410 #
1410 # addchangegroup assumes local user can lock remote
1411 # addchangegroup assumes local user can lock remote
1411 # repo (local filesystem, old ssh servers).
1412 # repo (local filesystem, old ssh servers).
1412 #
1413 #
1413 # unbundle assumes local user cannot lock remote repo (new ssh
1414 # unbundle assumes local user cannot lock remote repo (new ssh
1414 # servers, http servers).
1415 # servers, http servers).
1415
1416
1416 self.checkpush(force, revs)
1417 self.checkpush(force, revs)
1417 lock = None
1418 lock = None
1418 unbundle = remote.capable('unbundle')
1419 unbundle = remote.capable('unbundle')
1419 if not unbundle:
1420 if not unbundle:
1420 lock = remote.lock()
1421 lock = remote.lock()
1421 try:
1422 try:
1422 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1423 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1423 newbranch)
1424 newbranch)
1424 ret = remote_heads
1425 ret = remote_heads
1425 if cg is not None:
1426 if cg is not None:
1426 if unbundle:
1427 if unbundle:
1427 # local repo finds heads on server, finds out what
1428 # local repo finds heads on server, finds out what
1428 # revs it must push. once revs transferred, if server
1429 # revs it must push. once revs transferred, if server
1429 # finds it has different heads (someone else won
1430 # finds it has different heads (someone else won
1430 # commit/push race), server aborts.
1431 # commit/push race), server aborts.
1431 if force:
1432 if force:
1432 remote_heads = ['force']
1433 remote_heads = ['force']
1433 # ssh: return remote's addchangegroup()
1434 # ssh: return remote's addchangegroup()
1434 # http: return remote's addchangegroup() or 0 for error
1435 # http: return remote's addchangegroup() or 0 for error
1435 ret = remote.unbundle(cg, remote_heads, 'push')
1436 ret = remote.unbundle(cg, remote_heads, 'push')
1436 else:
1437 else:
1437 # we return an integer indicating remote head count change
1438 # we return an integer indicating remote head count change
1438 ret = remote.addchangegroup(cg, 'push', self.url(),
1439 ret = remote.addchangegroup(cg, 'push', self.url(),
1439 lock=lock)
1440 lock=lock)
1440 finally:
1441 finally:
1441 if lock is not None:
1442 if lock is not None:
1442 lock.release()
1443 lock.release()
1443
1444
1444 self.ui.debug("checking for updated bookmarks\n")
1445 self.ui.debug("checking for updated bookmarks\n")
1445 rb = remote.listkeys('bookmarks')
1446 rb = remote.listkeys('bookmarks')
1446 for k in rb.keys():
1447 for k in rb.keys():
1447 if k in self._bookmarks:
1448 if k in self._bookmarks:
1448 nr, nl = rb[k], hex(self._bookmarks[k])
1449 nr, nl = rb[k], hex(self._bookmarks[k])
1449 if nr in self:
1450 if nr in self:
1450 cr = self[nr]
1451 cr = self[nr]
1451 cl = self[nl]
1452 cl = self[nl]
1452 if cl in cr.descendants():
1453 if cl in cr.descendants():
1453 r = remote.pushkey('bookmarks', k, nr, nl)
1454 r = remote.pushkey('bookmarks', k, nr, nl)
1454 if r:
1455 if r:
1455 self.ui.status(_("updating bookmark %s\n") % k)
1456 self.ui.status(_("updating bookmark %s\n") % k)
1456 else:
1457 else:
1457 self.ui.warn(_('updating bookmark %s'
1458 self.ui.warn(_('updating bookmark %s'
1458 ' failed!\n') % k)
1459 ' failed!\n') % k)
1459
1460
1460 return ret
1461 return ret
1461
1462
1462 def changegroupinfo(self, nodes, source):
1463 def changegroupinfo(self, nodes, source):
1463 if self.ui.verbose or source == 'bundle':
1464 if self.ui.verbose or source == 'bundle':
1464 self.ui.status(_("%d changesets found\n") % len(nodes))
1465 self.ui.status(_("%d changesets found\n") % len(nodes))
1465 if self.ui.debugflag:
1466 if self.ui.debugflag:
1466 self.ui.debug("list of changesets:\n")
1467 self.ui.debug("list of changesets:\n")
1467 for node in nodes:
1468 for node in nodes:
1468 self.ui.debug("%s\n" % hex(node))
1469 self.ui.debug("%s\n" % hex(node))
1469
1470
1470 def changegroupsubset(self, bases, heads, source):
1471 def changegroupsubset(self, bases, heads, source):
1471 """Compute a changegroup consisting of all the nodes that are
1472 """Compute a changegroup consisting of all the nodes that are
1472 descendants of any of the bases and ancestors of any of the heads.
1473 descendants of any of the bases and ancestors of any of the heads.
1473 Return a chunkbuffer object whose read() method will return
1474 Return a chunkbuffer object whose read() method will return
1474 successive changegroup chunks.
1475 successive changegroup chunks.
1475
1476
1476 It is fairly complex as determining which filenodes and which
1477 It is fairly complex as determining which filenodes and which
1477 manifest nodes need to be included for the changeset to be complete
1478 manifest nodes need to be included for the changeset to be complete
1478 is non-trivial.
1479 is non-trivial.
1479
1480
1480 Another wrinkle is doing the reverse, figuring out which changeset in
1481 Another wrinkle is doing the reverse, figuring out which changeset in
1481 the changegroup a particular filenode or manifestnode belongs to.
1482 the changegroup a particular filenode or manifestnode belongs to.
1482 """
1483 """
1483 cl = self.changelog
1484 cl = self.changelog
1484 if not bases:
1485 if not bases:
1485 bases = [nullid]
1486 bases = [nullid]
1486 csets, bases, heads = cl.nodesbetween(bases, heads)
1487 csets, bases, heads = cl.nodesbetween(bases, heads)
1487 # We assume that all ancestors of bases are known
1488 # We assume that all ancestors of bases are known
1488 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1489 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1489 return self._changegroupsubset(common, csets, heads, source)
1490 return self._changegroupsubset(common, csets, heads, source)
1490
1491
1491 def getbundle(self, source, heads=None, common=None):
1492 def getbundle(self, source, heads=None, common=None):
1492 """Like changegroupsubset, but returns the set difference between the
1493 """Like changegroupsubset, but returns the set difference between the
1493 ancestors of heads and the ancestors common.
1494 ancestors of heads and the ancestors common.
1494
1495
1495 If heads is None, use the local heads. If common is None, use [nullid].
1496 If heads is None, use the local heads. If common is None, use [nullid].
1496
1497
1497 The nodes in common might not all be known locally due to the way the
1498 The nodes in common might not all be known locally due to the way the
1498 current discovery protocol works.
1499 current discovery protocol works.
1499 """
1500 """
1500 cl = self.changelog
1501 cl = self.changelog
1501 if common:
1502 if common:
1502 nm = cl.nodemap
1503 nm = cl.nodemap
1503 common = [n for n in common if n in nm]
1504 common = [n for n in common if n in nm]
1504 else:
1505 else:
1505 common = [nullid]
1506 common = [nullid]
1506 if not heads:
1507 if not heads:
1507 heads = cl.heads()
1508 heads = cl.heads()
1508 common, missing = cl.findcommonmissing(common, heads)
1509 common, missing = cl.findcommonmissing(common, heads)
1509 if not missing:
1510 if not missing:
1510 return None
1511 return None
1511 return self._changegroupsubset(common, missing, heads, source)
1512 return self._changegroupsubset(common, missing, heads, source)
1512
1513
1513 def _changegroupsubset(self, commonrevs, csets, heads, source):
1514 def _changegroupsubset(self, commonrevs, csets, heads, source):
1514
1515
1515 cl = self.changelog
1516 cl = self.changelog
1516 mf = self.manifest
1517 mf = self.manifest
1517 mfs = {} # needed manifests
1518 mfs = {} # needed manifests
1518 fnodes = {} # needed file nodes
1519 fnodes = {} # needed file nodes
1519 changedfiles = set()
1520 changedfiles = set()
1520 fstate = ['', {}]
1521 fstate = ['', {}]
1521 count = [0]
1522 count = [0]
1522
1523
1523 # can we go through the fast path ?
1524 # can we go through the fast path ?
1524 heads.sort()
1525 heads.sort()
1525 if heads == sorted(self.heads()):
1526 if heads == sorted(self.heads()):
1526 return self._changegroup(csets, source)
1527 return self._changegroup(csets, source)
1527
1528
1528 # slow path
1529 # slow path
1529 self.hook('preoutgoing', throw=True, source=source)
1530 self.hook('preoutgoing', throw=True, source=source)
1530 self.changegroupinfo(csets, source)
1531 self.changegroupinfo(csets, source)
1531
1532
1532 # filter any nodes that claim to be part of the known set
1533 # filter any nodes that claim to be part of the known set
1533 def prune(revlog, missing):
1534 def prune(revlog, missing):
1534 return [n for n in missing
1535 return [n for n in missing
1535 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1536 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1536
1537
1537 def lookup(revlog, x):
1538 def lookup(revlog, x):
1538 if revlog == cl:
1539 if revlog == cl:
1539 c = cl.read(x)
1540 c = cl.read(x)
1540 changedfiles.update(c[3])
1541 changedfiles.update(c[3])
1541 mfs.setdefault(c[0], x)
1542 mfs.setdefault(c[0], x)
1542 count[0] += 1
1543 count[0] += 1
1543 self.ui.progress(_('bundling'), count[0],
1544 self.ui.progress(_('bundling'), count[0],
1544 unit=_('changesets'), total=len(csets))
1545 unit=_('changesets'), total=len(csets))
1545 return x
1546 return x
1546 elif revlog == mf:
1547 elif revlog == mf:
1547 clnode = mfs[x]
1548 clnode = mfs[x]
1548 mdata = mf.readfast(x)
1549 mdata = mf.readfast(x)
1549 for f in changedfiles:
1550 for f in changedfiles:
1550 if f in mdata:
1551 if f in mdata:
1551 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1552 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1552 count[0] += 1
1553 count[0] += 1
1553 self.ui.progress(_('bundling'), count[0],
1554 self.ui.progress(_('bundling'), count[0],
1554 unit=_('manifests'), total=len(mfs))
1555 unit=_('manifests'), total=len(mfs))
1555 return mfs[x]
1556 return mfs[x]
1556 else:
1557 else:
1557 self.ui.progress(
1558 self.ui.progress(
1558 _('bundling'), count[0], item=fstate[0],
1559 _('bundling'), count[0], item=fstate[0],
1559 unit=_('files'), total=len(changedfiles))
1560 unit=_('files'), total=len(changedfiles))
1560 return fstate[1][x]
1561 return fstate[1][x]
1561
1562
1562 bundler = changegroup.bundle10(lookup)
1563 bundler = changegroup.bundle10(lookup)
1563 reorder = self.ui.config('bundle', 'reorder', 'auto')
1564 reorder = self.ui.config('bundle', 'reorder', 'auto')
1564 if reorder == 'auto':
1565 if reorder == 'auto':
1565 reorder = None
1566 reorder = None
1566 else:
1567 else:
1567 reorder = util.parsebool(reorder)
1568 reorder = util.parsebool(reorder)
1568
1569
1569 def gengroup():
1570 def gengroup():
1570 # Create a changenode group generator that will call our functions
1571 # Create a changenode group generator that will call our functions
1571 # back to lookup the owning changenode and collect information.
1572 # back to lookup the owning changenode and collect information.
1572 for chunk in cl.group(csets, bundler, reorder=reorder):
1573 for chunk in cl.group(csets, bundler, reorder=reorder):
1573 yield chunk
1574 yield chunk
1574 self.ui.progress(_('bundling'), None)
1575 self.ui.progress(_('bundling'), None)
1575
1576
1576 # Create a generator for the manifestnodes that calls our lookup
1577 # Create a generator for the manifestnodes that calls our lookup
1577 # and data collection functions back.
1578 # and data collection functions back.
1578 count[0] = 0
1579 count[0] = 0
1579 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1580 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1580 yield chunk
1581 yield chunk
1581 self.ui.progress(_('bundling'), None)
1582 self.ui.progress(_('bundling'), None)
1582
1583
1583 mfs.clear()
1584 mfs.clear()
1584
1585
1585 # Go through all our files in order sorted by name.
1586 # Go through all our files in order sorted by name.
1586 count[0] = 0
1587 count[0] = 0
1587 for fname in sorted(changedfiles):
1588 for fname in sorted(changedfiles):
1588 filerevlog = self.file(fname)
1589 filerevlog = self.file(fname)
1589 if not len(filerevlog):
1590 if not len(filerevlog):
1590 raise util.Abort(_("empty or missing revlog for %s") % fname)
1591 raise util.Abort(_("empty or missing revlog for %s") % fname)
1591 fstate[0] = fname
1592 fstate[0] = fname
1592 fstate[1] = fnodes.pop(fname, {})
1593 fstate[1] = fnodes.pop(fname, {})
1593
1594
1594 nodelist = prune(filerevlog, fstate[1])
1595 nodelist = prune(filerevlog, fstate[1])
1595 if nodelist:
1596 if nodelist:
1596 count[0] += 1
1597 count[0] += 1
1597 yield bundler.fileheader(fname)
1598 yield bundler.fileheader(fname)
1598 for chunk in filerevlog.group(nodelist, bundler, reorder):
1599 for chunk in filerevlog.group(nodelist, bundler, reorder):
1599 yield chunk
1600 yield chunk
1600
1601
1601 # Signal that no more groups are left.
1602 # Signal that no more groups are left.
1602 yield bundler.close()
1603 yield bundler.close()
1603 self.ui.progress(_('bundling'), None)
1604 self.ui.progress(_('bundling'), None)
1604
1605
1605 if csets:
1606 if csets:
1606 self.hook('outgoing', node=hex(csets[0]), source=source)
1607 self.hook('outgoing', node=hex(csets[0]), source=source)
1607
1608
1608 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1609 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1609
1610
1610 def changegroup(self, basenodes, source):
1611 def changegroup(self, basenodes, source):
1611 # to avoid a race we use changegroupsubset() (issue1320)
1612 # to avoid a race we use changegroupsubset() (issue1320)
1612 return self.changegroupsubset(basenodes, self.heads(), source)
1613 return self.changegroupsubset(basenodes, self.heads(), source)
1613
1614
1614 def _changegroup(self, nodes, source):
1615 def _changegroup(self, nodes, source):
1615 """Compute the changegroup of all nodes that we have that a recipient
1616 """Compute the changegroup of all nodes that we have that a recipient
1616 doesn't. Return a chunkbuffer object whose read() method will return
1617 doesn't. Return a chunkbuffer object whose read() method will return
1617 successive changegroup chunks.
1618 successive changegroup chunks.
1618
1619
1619 This is much easier than the previous function as we can assume that
1620 This is much easier than the previous function as we can assume that
1620 the recipient has any changenode we aren't sending them.
1621 the recipient has any changenode we aren't sending them.
1621
1622
1622 nodes is the set of nodes to send"""
1623 nodes is the set of nodes to send"""
1623
1624
1624 cl = self.changelog
1625 cl = self.changelog
1625 mf = self.manifest
1626 mf = self.manifest
1626 mfs = {}
1627 mfs = {}
1627 changedfiles = set()
1628 changedfiles = set()
1628 fstate = ['']
1629 fstate = ['']
1629 count = [0]
1630 count = [0]
1630
1631
1631 self.hook('preoutgoing', throw=True, source=source)
1632 self.hook('preoutgoing', throw=True, source=source)
1632 self.changegroupinfo(nodes, source)
1633 self.changegroupinfo(nodes, source)
1633
1634
1634 revset = set([cl.rev(n) for n in nodes])
1635 revset = set([cl.rev(n) for n in nodes])
1635
1636
1636 def gennodelst(log):
1637 def gennodelst(log):
1637 return [log.node(r) for r in log if log.linkrev(r) in revset]
1638 return [log.node(r) for r in log if log.linkrev(r) in revset]
1638
1639
1639 def lookup(revlog, x):
1640 def lookup(revlog, x):
1640 if revlog == cl:
1641 if revlog == cl:
1641 c = cl.read(x)
1642 c = cl.read(x)
1642 changedfiles.update(c[3])
1643 changedfiles.update(c[3])
1643 mfs.setdefault(c[0], x)
1644 mfs.setdefault(c[0], x)
1644 count[0] += 1
1645 count[0] += 1
1645 self.ui.progress(_('bundling'), count[0],
1646 self.ui.progress(_('bundling'), count[0],
1646 unit=_('changesets'), total=len(nodes))
1647 unit=_('changesets'), total=len(nodes))
1647 return x
1648 return x
1648 elif revlog == mf:
1649 elif revlog == mf:
1649 count[0] += 1
1650 count[0] += 1
1650 self.ui.progress(_('bundling'), count[0],
1651 self.ui.progress(_('bundling'), count[0],
1651 unit=_('manifests'), total=len(mfs))
1652 unit=_('manifests'), total=len(mfs))
1652 return cl.node(revlog.linkrev(revlog.rev(x)))
1653 return cl.node(revlog.linkrev(revlog.rev(x)))
1653 else:
1654 else:
1654 self.ui.progress(
1655 self.ui.progress(
1655 _('bundling'), count[0], item=fstate[0],
1656 _('bundling'), count[0], item=fstate[0],
1656 total=len(changedfiles), unit=_('files'))
1657 total=len(changedfiles), unit=_('files'))
1657 return cl.node(revlog.linkrev(revlog.rev(x)))
1658 return cl.node(revlog.linkrev(revlog.rev(x)))
1658
1659
1659 bundler = changegroup.bundle10(lookup)
1660 bundler = changegroup.bundle10(lookup)
1660 reorder = self.ui.config('bundle', 'reorder', 'auto')
1661 reorder = self.ui.config('bundle', 'reorder', 'auto')
1661 if reorder == 'auto':
1662 if reorder == 'auto':
1662 reorder = None
1663 reorder = None
1663 else:
1664 else:
1664 reorder = util.parsebool(reorder)
1665 reorder = util.parsebool(reorder)
1665
1666
1666 def gengroup():
1667 def gengroup():
1667 '''yield a sequence of changegroup chunks (strings)'''
1668 '''yield a sequence of changegroup chunks (strings)'''
1668 # construct a list of all changed files
1669 # construct a list of all changed files
1669
1670
1670 for chunk in cl.group(nodes, bundler, reorder=reorder):
1671 for chunk in cl.group(nodes, bundler, reorder=reorder):
1671 yield chunk
1672 yield chunk
1672 self.ui.progress(_('bundling'), None)
1673 self.ui.progress(_('bundling'), None)
1673
1674
1674 count[0] = 0
1675 count[0] = 0
1675 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1676 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1676 yield chunk
1677 yield chunk
1677 self.ui.progress(_('bundling'), None)
1678 self.ui.progress(_('bundling'), None)
1678
1679
1679 count[0] = 0
1680 count[0] = 0
1680 for fname in sorted(changedfiles):
1681 for fname in sorted(changedfiles):
1681 filerevlog = self.file(fname)
1682 filerevlog = self.file(fname)
1682 if not len(filerevlog):
1683 if not len(filerevlog):
1683 raise util.Abort(_("empty or missing revlog for %s") % fname)
1684 raise util.Abort(_("empty or missing revlog for %s") % fname)
1684 fstate[0] = fname
1685 fstate[0] = fname
1685 nodelist = gennodelst(filerevlog)
1686 nodelist = gennodelst(filerevlog)
1686 if nodelist:
1687 if nodelist:
1687 count[0] += 1
1688 count[0] += 1
1688 yield bundler.fileheader(fname)
1689 yield bundler.fileheader(fname)
1689 for chunk in filerevlog.group(nodelist, bundler, reorder):
1690 for chunk in filerevlog.group(nodelist, bundler, reorder):
1690 yield chunk
1691 yield chunk
1691 yield bundler.close()
1692 yield bundler.close()
1692 self.ui.progress(_('bundling'), None)
1693 self.ui.progress(_('bundling'), None)
1693
1694
1694 if nodes:
1695 if nodes:
1695 self.hook('outgoing', node=hex(nodes[0]), source=source)
1696 self.hook('outgoing', node=hex(nodes[0]), source=source)
1696
1697
1697 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1698 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1698
1699
1699 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1700 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1700 """Add the changegroup returned by source.read() to this repo.
1701 """Add the changegroup returned by source.read() to this repo.
1701 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1702 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1702 the URL of the repo where this changegroup is coming from.
1703 the URL of the repo where this changegroup is coming from.
1703 If lock is not None, the function takes ownership of the lock
1704 If lock is not None, the function takes ownership of the lock
1704 and releases it after the changegroup is added.
1705 and releases it after the changegroup is added.
1705
1706
1706 Return an integer summarizing the change to this repo:
1707 Return an integer summarizing the change to this repo:
1707 - nothing changed or no source: 0
1708 - nothing changed or no source: 0
1708 - more heads than before: 1+added heads (2..n)
1709 - more heads than before: 1+added heads (2..n)
1709 - fewer heads than before: -1-removed heads (-2..-n)
1710 - fewer heads than before: -1-removed heads (-2..-n)
1710 - number of heads stays the same: 1
1711 - number of heads stays the same: 1
1711 """
1712 """
1712 def csmap(x):
1713 def csmap(x):
1713 self.ui.debug("add changeset %s\n" % short(x))
1714 self.ui.debug("add changeset %s\n" % short(x))
1714 return len(cl)
1715 return len(cl)
1715
1716
1716 def revmap(x):
1717 def revmap(x):
1717 return cl.rev(x)
1718 return cl.rev(x)
1718
1719
1719 if not source:
1720 if not source:
1720 return 0
1721 return 0
1721
1722
1722 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1723 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1723
1724
1724 changesets = files = revisions = 0
1725 changesets = files = revisions = 0
1725 efiles = set()
1726 efiles = set()
1726
1727
1727 # write changelog data to temp files so concurrent readers will not see
1728 # write changelog data to temp files so concurrent readers will not see
1728 # inconsistent view
1729 # inconsistent view
1729 cl = self.changelog
1730 cl = self.changelog
1730 cl.delayupdate()
1731 cl.delayupdate()
1731 oldheads = cl.heads()
1732 oldheads = cl.heads()
1732
1733
1733 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1734 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1734 try:
1735 try:
1735 trp = weakref.proxy(tr)
1736 trp = weakref.proxy(tr)
1736 # pull off the changeset group
1737 # pull off the changeset group
1737 self.ui.status(_("adding changesets\n"))
1738 self.ui.status(_("adding changesets\n"))
1738 clstart = len(cl)
1739 clstart = len(cl)
1739 class prog(object):
1740 class prog(object):
1740 step = _('changesets')
1741 step = _('changesets')
1741 count = 1
1742 count = 1
1742 ui = self.ui
1743 ui = self.ui
1743 total = None
1744 total = None
1744 def __call__(self):
1745 def __call__(self):
1745 self.ui.progress(self.step, self.count, unit=_('chunks'),
1746 self.ui.progress(self.step, self.count, unit=_('chunks'),
1746 total=self.total)
1747 total=self.total)
1747 self.count += 1
1748 self.count += 1
1748 pr = prog()
1749 pr = prog()
1749 source.callback = pr
1750 source.callback = pr
1750
1751
1751 source.changelogheader()
1752 source.changelogheader()
1752 if (cl.addgroup(source, csmap, trp) is None
1753 if (cl.addgroup(source, csmap, trp) is None
1753 and not emptyok):
1754 and not emptyok):
1754 raise util.Abort(_("received changelog group is empty"))
1755 raise util.Abort(_("received changelog group is empty"))
1755 clend = len(cl)
1756 clend = len(cl)
1756 changesets = clend - clstart
1757 changesets = clend - clstart
1757 for c in xrange(clstart, clend):
1758 for c in xrange(clstart, clend):
1758 efiles.update(self[c].files())
1759 efiles.update(self[c].files())
1759 efiles = len(efiles)
1760 efiles = len(efiles)
1760 self.ui.progress(_('changesets'), None)
1761 self.ui.progress(_('changesets'), None)
1761
1762
1762 # pull off the manifest group
1763 # pull off the manifest group
1763 self.ui.status(_("adding manifests\n"))
1764 self.ui.status(_("adding manifests\n"))
1764 pr.step = _('manifests')
1765 pr.step = _('manifests')
1765 pr.count = 1
1766 pr.count = 1
1766 pr.total = changesets # manifests <= changesets
1767 pr.total = changesets # manifests <= changesets
1767 # no need to check for empty manifest group here:
1768 # no need to check for empty manifest group here:
1768 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1769 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1769 # no new manifest will be created and the manifest group will
1770 # no new manifest will be created and the manifest group will
1770 # be empty during the pull
1771 # be empty during the pull
1771 source.manifestheader()
1772 source.manifestheader()
1772 self.manifest.addgroup(source, revmap, trp)
1773 self.manifest.addgroup(source, revmap, trp)
1773 self.ui.progress(_('manifests'), None)
1774 self.ui.progress(_('manifests'), None)
1774
1775
1775 needfiles = {}
1776 needfiles = {}
1776 if self.ui.configbool('server', 'validate', default=False):
1777 if self.ui.configbool('server', 'validate', default=False):
1777 # validate incoming csets have their manifests
1778 # validate incoming csets have their manifests
1778 for cset in xrange(clstart, clend):
1779 for cset in xrange(clstart, clend):
1779 mfest = self.changelog.read(self.changelog.node(cset))[0]
1780 mfest = self.changelog.read(self.changelog.node(cset))[0]
1780 mfest = self.manifest.readdelta(mfest)
1781 mfest = self.manifest.readdelta(mfest)
1781 # store file nodes we must see
1782 # store file nodes we must see
1782 for f, n in mfest.iteritems():
1783 for f, n in mfest.iteritems():
1783 needfiles.setdefault(f, set()).add(n)
1784 needfiles.setdefault(f, set()).add(n)
1784
1785
1785 # process the files
1786 # process the files
1786 self.ui.status(_("adding file changes\n"))
1787 self.ui.status(_("adding file changes\n"))
1787 pr.step = _('files')
1788 pr.step = _('files')
1788 pr.count = 1
1789 pr.count = 1
1789 pr.total = efiles
1790 pr.total = efiles
1790 source.callback = None
1791 source.callback = None
1791
1792
1792 while True:
1793 while True:
1793 chunkdata = source.filelogheader()
1794 chunkdata = source.filelogheader()
1794 if not chunkdata:
1795 if not chunkdata:
1795 break
1796 break
1796 f = chunkdata["filename"]
1797 f = chunkdata["filename"]
1797 self.ui.debug("adding %s revisions\n" % f)
1798 self.ui.debug("adding %s revisions\n" % f)
1798 pr()
1799 pr()
1799 fl = self.file(f)
1800 fl = self.file(f)
1800 o = len(fl)
1801 o = len(fl)
1801 if fl.addgroup(source, revmap, trp) is None:
1802 if fl.addgroup(source, revmap, trp) is None:
1802 raise util.Abort(_("received file revlog group is empty"))
1803 raise util.Abort(_("received file revlog group is empty"))
1803 revisions += len(fl) - o
1804 revisions += len(fl) - o
1804 files += 1
1805 files += 1
1805 if f in needfiles:
1806 if f in needfiles:
1806 needs = needfiles[f]
1807 needs = needfiles[f]
1807 for new in xrange(o, len(fl)):
1808 for new in xrange(o, len(fl)):
1808 n = fl.node(new)
1809 n = fl.node(new)
1809 if n in needs:
1810 if n in needs:
1810 needs.remove(n)
1811 needs.remove(n)
1811 if not needs:
1812 if not needs:
1812 del needfiles[f]
1813 del needfiles[f]
1813 self.ui.progress(_('files'), None)
1814 self.ui.progress(_('files'), None)
1814
1815
1815 for f, needs in needfiles.iteritems():
1816 for f, needs in needfiles.iteritems():
1816 fl = self.file(f)
1817 fl = self.file(f)
1817 for n in needs:
1818 for n in needs:
1818 try:
1819 try:
1819 fl.rev(n)
1820 fl.rev(n)
1820 except error.LookupError:
1821 except error.LookupError:
1821 raise util.Abort(
1822 raise util.Abort(
1822 _('missing file data for %s:%s - run hg verify') %
1823 _('missing file data for %s:%s - run hg verify') %
1823 (f, hex(n)))
1824 (f, hex(n)))
1824
1825
1825 dh = 0
1826 dh = 0
1826 if oldheads:
1827 if oldheads:
1827 heads = cl.heads()
1828 heads = cl.heads()
1828 dh = len(heads) - len(oldheads)
1829 dh = len(heads) - len(oldheads)
1829 for h in heads:
1830 for h in heads:
1830 if h not in oldheads and 'close' in self[h].extra():
1831 if h not in oldheads and 'close' in self[h].extra():
1831 dh -= 1
1832 dh -= 1
1832 htext = ""
1833 htext = ""
1833 if dh:
1834 if dh:
1834 htext = _(" (%+d heads)") % dh
1835 htext = _(" (%+d heads)") % dh
1835
1836
1836 self.ui.status(_("added %d changesets"
1837 self.ui.status(_("added %d changesets"
1837 " with %d changes to %d files%s\n")
1838 " with %d changes to %d files%s\n")
1838 % (changesets, revisions, files, htext))
1839 % (changesets, revisions, files, htext))
1839
1840
1840 if changesets > 0:
1841 if changesets > 0:
1841 p = lambda: cl.writepending() and self.root or ""
1842 p = lambda: cl.writepending() and self.root or ""
1842 self.hook('pretxnchangegroup', throw=True,
1843 self.hook('pretxnchangegroup', throw=True,
1843 node=hex(cl.node(clstart)), source=srctype,
1844 node=hex(cl.node(clstart)), source=srctype,
1844 url=url, pending=p)
1845 url=url, pending=p)
1845
1846
1846 # make changelog see real files again
1847 # make changelog see real files again
1847 cl.finalize(trp)
1848 cl.finalize(trp)
1848
1849
1849 tr.close()
1850 tr.close()
1850 finally:
1851 finally:
1851 tr.release()
1852 tr.release()
1852 if lock:
1853 if lock:
1853 lock.release()
1854 lock.release()
1854
1855
1855 if changesets > 0:
1856 if changesets > 0:
1856 # forcefully update the on-disk branch cache
1857 # forcefully update the on-disk branch cache
1857 self.ui.debug("updating the branch cache\n")
1858 self.ui.debug("updating the branch cache\n")
1858 self.updatebranchcache()
1859 self.updatebranchcache()
1859 self.hook("changegroup", node=hex(cl.node(clstart)),
1860 self.hook("changegroup", node=hex(cl.node(clstart)),
1860 source=srctype, url=url)
1861 source=srctype, url=url)
1861
1862
1862 for i in xrange(clstart, clend):
1863 for i in xrange(clstart, clend):
1863 self.hook("incoming", node=hex(cl.node(i)),
1864 self.hook("incoming", node=hex(cl.node(i)),
1864 source=srctype, url=url)
1865 source=srctype, url=url)
1865
1866
1866 # never return 0 here:
1867 # never return 0 here:
1867 if dh < 0:
1868 if dh < 0:
1868 return dh - 1
1869 return dh - 1
1869 else:
1870 else:
1870 return dh + 1
1871 return dh + 1
1871
1872
1872 def stream_in(self, remote, requirements):
1873 def stream_in(self, remote, requirements):
1873 lock = self.lock()
1874 lock = self.lock()
1874 try:
1875 try:
1875 fp = remote.stream_out()
1876 fp = remote.stream_out()
1876 l = fp.readline()
1877 l = fp.readline()
1877 try:
1878 try:
1878 resp = int(l)
1879 resp = int(l)
1879 except ValueError:
1880 except ValueError:
1880 raise error.ResponseError(
1881 raise error.ResponseError(
1881 _('Unexpected response from remote server:'), l)
1882 _('Unexpected response from remote server:'), l)
1882 if resp == 1:
1883 if resp == 1:
1883 raise util.Abort(_('operation forbidden by server'))
1884 raise util.Abort(_('operation forbidden by server'))
1884 elif resp == 2:
1885 elif resp == 2:
1885 raise util.Abort(_('locking the remote repository failed'))
1886 raise util.Abort(_('locking the remote repository failed'))
1886 elif resp != 0:
1887 elif resp != 0:
1887 raise util.Abort(_('the server sent an unknown error code'))
1888 raise util.Abort(_('the server sent an unknown error code'))
1888 self.ui.status(_('streaming all changes\n'))
1889 self.ui.status(_('streaming all changes\n'))
1889 l = fp.readline()
1890 l = fp.readline()
1890 try:
1891 try:
1891 total_files, total_bytes = map(int, l.split(' ', 1))
1892 total_files, total_bytes = map(int, l.split(' ', 1))
1892 except (ValueError, TypeError):
1893 except (ValueError, TypeError):
1893 raise error.ResponseError(
1894 raise error.ResponseError(
1894 _('Unexpected response from remote server:'), l)
1895 _('Unexpected response from remote server:'), l)
1895 self.ui.status(_('%d files to transfer, %s of data\n') %
1896 self.ui.status(_('%d files to transfer, %s of data\n') %
1896 (total_files, util.bytecount(total_bytes)))
1897 (total_files, util.bytecount(total_bytes)))
1897 start = time.time()
1898 start = time.time()
1898 for i in xrange(total_files):
1899 for i in xrange(total_files):
1899 # XXX doesn't support '\n' or '\r' in filenames
1900 # XXX doesn't support '\n' or '\r' in filenames
1900 l = fp.readline()
1901 l = fp.readline()
1901 try:
1902 try:
1902 name, size = l.split('\0', 1)
1903 name, size = l.split('\0', 1)
1903 size = int(size)
1904 size = int(size)
1904 except (ValueError, TypeError):
1905 except (ValueError, TypeError):
1905 raise error.ResponseError(
1906 raise error.ResponseError(
1906 _('Unexpected response from remote server:'), l)
1907 _('Unexpected response from remote server:'), l)
1907 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1908 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1908 # for backwards compat, name was partially encoded
1909 # for backwards compat, name was partially encoded
1909 ofp = self.sopener(store.decodedir(name), 'w')
1910 ofp = self.sopener(store.decodedir(name), 'w')
1910 for chunk in util.filechunkiter(fp, limit=size):
1911 for chunk in util.filechunkiter(fp, limit=size):
1911 ofp.write(chunk)
1912 ofp.write(chunk)
1912 ofp.close()
1913 ofp.close()
1913 elapsed = time.time() - start
1914 elapsed = time.time() - start
1914 if elapsed <= 0:
1915 if elapsed <= 0:
1915 elapsed = 0.001
1916 elapsed = 0.001
1916 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1917 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1917 (util.bytecount(total_bytes), elapsed,
1918 (util.bytecount(total_bytes), elapsed,
1918 util.bytecount(total_bytes / elapsed)))
1919 util.bytecount(total_bytes / elapsed)))
1919
1920
1920 # new requirements = old non-format requirements + new format-related
1921 # new requirements = old non-format requirements + new format-related
1921 # requirements from the streamed-in repository
1922 # requirements from the streamed-in repository
1922 requirements.update(set(self.requirements) - self.supportedformats)
1923 requirements.update(set(self.requirements) - self.supportedformats)
1923 self._applyrequirements(requirements)
1924 self._applyrequirements(requirements)
1924 self._writerequirements()
1925 self._writerequirements()
1925
1926
1926 self.invalidate()
1927 self.invalidate()
1927 return len(self.heads()) + 1
1928 return len(self.heads()) + 1
1928 finally:
1929 finally:
1929 lock.release()
1930 lock.release()
1930
1931
1931 def clone(self, remote, heads=[], stream=False):
1932 def clone(self, remote, heads=[], stream=False):
1932 '''clone remote repository.
1933 '''clone remote repository.
1933
1934
1934 keyword arguments:
1935 keyword arguments:
1935 heads: list of revs to clone (forces use of pull)
1936 heads: list of revs to clone (forces use of pull)
1936 stream: use streaming clone if possible'''
1937 stream: use streaming clone if possible'''
1937
1938
1938 # now, all clients that can request uncompressed clones can
1939 # now, all clients that can request uncompressed clones can
1939 # read repo formats supported by all servers that can serve
1940 # read repo formats supported by all servers that can serve
1940 # them.
1941 # them.
1941
1942
1942 # if revlog format changes, client will have to check version
1943 # if revlog format changes, client will have to check version
1943 # and format flags on "stream" capability, and use
1944 # and format flags on "stream" capability, and use
1944 # uncompressed only if compatible.
1945 # uncompressed only if compatible.
1945
1946
1946 if stream and not heads:
1947 if stream and not heads:
1947 # 'stream' means remote revlog format is revlogv1 only
1948 # 'stream' means remote revlog format is revlogv1 only
1948 if remote.capable('stream'):
1949 if remote.capable('stream'):
1949 return self.stream_in(remote, set(('revlogv1',)))
1950 return self.stream_in(remote, set(('revlogv1',)))
1950 # otherwise, 'streamreqs' contains the remote revlog format
1951 # otherwise, 'streamreqs' contains the remote revlog format
1951 streamreqs = remote.capable('streamreqs')
1952 streamreqs = remote.capable('streamreqs')
1952 if streamreqs:
1953 if streamreqs:
1953 streamreqs = set(streamreqs.split(','))
1954 streamreqs = set(streamreqs.split(','))
1954 # if we support it, stream in and adjust our requirements
1955 # if we support it, stream in and adjust our requirements
1955 if not streamreqs - self.supportedformats:
1956 if not streamreqs - self.supportedformats:
1956 return self.stream_in(remote, streamreqs)
1957 return self.stream_in(remote, streamreqs)
1957 return self.pull(remote, heads)
1958 return self.pull(remote, heads)
1958
1959
1959 def pushkey(self, namespace, key, old, new):
1960 def pushkey(self, namespace, key, old, new):
1960 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1961 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1961 old=old, new=new)
1962 old=old, new=new)
1962 ret = pushkey.push(self, namespace, key, old, new)
1963 ret = pushkey.push(self, namespace, key, old, new)
1963 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1964 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1964 ret=ret)
1965 ret=ret)
1965 return ret
1966 return ret
1966
1967
1967 def listkeys(self, namespace):
1968 def listkeys(self, namespace):
1968 self.hook('prelistkeys', throw=True, namespace=namespace)
1969 self.hook('prelistkeys', throw=True, namespace=namespace)
1969 values = pushkey.list(self, namespace)
1970 values = pushkey.list(self, namespace)
1970 self.hook('listkeys', namespace=namespace, values=values)
1971 self.hook('listkeys', namespace=namespace, values=values)
1971 return values
1972 return values
1972
1973
1973 def debugwireargs(self, one, two, three=None, four=None, five=None):
1974 def debugwireargs(self, one, two, three=None, four=None, five=None):
1974 '''used to test argument passing over the wire'''
1975 '''used to test argument passing over the wire'''
1975 return "%s %s %s %s %s" % (one, two, three, four, five)
1976 return "%s %s %s %s %s" % (one, two, three, four, five)
1976
1977
1977 def savecommitmessage(self, text):
1978 def savecommitmessage(self, text):
1978 fp = self.opener('last-message.txt', 'wb')
1979 fp = self.opener('last-message.txt', 'wb')
1979 try:
1980 try:
1980 fp.write(text)
1981 fp.write(text)
1981 finally:
1982 finally:
1982 fp.close()
1983 fp.close()
1983 return self.pathto(fp.name[len(self.root)+1:])
1984 return self.pathto(fp.name[len(self.root)+1:])
1984
1985
1985 # used to avoid circular references so destructors work
1986 # used to avoid circular references so destructors work
1986 def aftertrans(files):
1987 def aftertrans(files):
1987 renamefiles = [tuple(t) for t in files]
1988 renamefiles = [tuple(t) for t in files]
1988 def a():
1989 def a():
1989 for src, dest in renamefiles:
1990 for src, dest in renamefiles:
1990 util.rename(src, dest)
1991 util.rename(src, dest)
1991 return a
1992 return a
1992
1993
1993 def undoname(fn):
1994 def undoname(fn):
1994 base, name = os.path.split(fn)
1995 base, name = os.path.split(fn)
1995 assert name.startswith('journal')
1996 assert name.startswith('journal')
1996 return os.path.join(base, name.replace('journal', 'undo', 1))
1997 return os.path.join(base, name.replace('journal', 'undo', 1))
1997
1998
1998 def instance(ui, path, create):
1999 def instance(ui, path, create):
1999 return localrepository(ui, util.urllocalpath(path), create)
2000 return localrepository(ui, util.urllocalpath(path), create)
2000
2001
2001 def islocal(path):
2002 def islocal(path):
2002 return True
2003 return True
@@ -1,194 +1,194 b''
1 This test tries to exercise the ssh functionality with a dummy script
1 This test tries to exercise the ssh functionality with a dummy script
2
2
3 $ checknewrepo()
3 $ checknewrepo()
4 > {
4 > {
5 > name=$1
5 > name=$1
6 > if [ -d "$name"/.hg/store ]; then
6 > if [ -d "$name"/.hg/store ]; then
7 > echo store created
7 > echo store created
8 > fi
8 > fi
9 > if [ -f "$name"/.hg/00changelog.i ]; then
9 > if [ -f "$name"/.hg/00changelog.i ]; then
10 > echo 00changelog.i created
10 > echo 00changelog.i created
11 > fi
11 > fi
12 > cat "$name"/.hg/requires
12 > cat "$name"/.hg/requires
13 > }
13 > }
14
14
15 creating 'local'
15 creating 'local'
16
16
17 $ hg init local
17 $ hg init local
18 $ checknewrepo local
18 $ checknewrepo local
19 store created
19 store created
20 00changelog.i created
20 00changelog.i created
21 revlogv1
21 revlogv1
22 fncache
22 store
23 store
23 fncache
24 dotencode
24 dotencode
25 $ echo this > local/foo
25 $ echo this > local/foo
26 $ hg ci --cwd local -A -m "init"
26 $ hg ci --cwd local -A -m "init"
27 adding foo
27 adding foo
28
28
29 creating repo with format.usestore=false
29 creating repo with format.usestore=false
30
30
31 $ hg --config format.usestore=false init old
31 $ hg --config format.usestore=false init old
32 $ checknewrepo old
32 $ checknewrepo old
33 revlogv1
33 revlogv1
34
34
35 creating repo with format.usefncache=false
35 creating repo with format.usefncache=false
36
36
37 $ hg --config format.usefncache=false init old2
37 $ hg --config format.usefncache=false init old2
38 $ checknewrepo old2
38 $ checknewrepo old2
39 store created
39 store created
40 00changelog.i created
40 00changelog.i created
41 revlogv1
41 revlogv1
42 store
42 store
43
43
44 creating repo with format.dotencode=false
44 creating repo with format.dotencode=false
45
45
46 $ hg --config format.dotencode=false init old3
46 $ hg --config format.dotencode=false init old3
47 $ checknewrepo old3
47 $ checknewrepo old3
48 store created
48 store created
49 00changelog.i created
49 00changelog.i created
50 revlogv1
50 revlogv1
51 fncache
51 store
52 store
52 fncache
53
53
54 test failure
54 test failure
55
55
56 $ hg init local
56 $ hg init local
57 abort: repository local already exists!
57 abort: repository local already exists!
58 [255]
58 [255]
59
59
60 init+push to remote2
60 init+push to remote2
61
61
62 $ hg init -e "python $TESTDIR/dummyssh" ssh://user@dummy/remote2
62 $ hg init -e "python $TESTDIR/dummyssh" ssh://user@dummy/remote2
63 $ hg incoming -R remote2 local
63 $ hg incoming -R remote2 local
64 comparing with local
64 comparing with local
65 changeset: 0:08b9e9f63b32
65 changeset: 0:08b9e9f63b32
66 tag: tip
66 tag: tip
67 user: test
67 user: test
68 date: Thu Jan 01 00:00:00 1970 +0000
68 date: Thu Jan 01 00:00:00 1970 +0000
69 summary: init
69 summary: init
70
70
71
71
72 $ hg push -R local -e "python $TESTDIR/dummyssh" ssh://user@dummy/remote2
72 $ hg push -R local -e "python $TESTDIR/dummyssh" ssh://user@dummy/remote2
73 pushing to ssh://user@dummy/remote2
73 pushing to ssh://user@dummy/remote2
74 searching for changes
74 searching for changes
75 remote: adding changesets
75 remote: adding changesets
76 remote: adding manifests
76 remote: adding manifests
77 remote: adding file changes
77 remote: adding file changes
78 remote: added 1 changesets with 1 changes to 1 files
78 remote: added 1 changesets with 1 changes to 1 files
79
79
80 clone to remote1
80 clone to remote1
81
81
82 $ hg clone -e "python $TESTDIR/dummyssh" local ssh://user@dummy/remote1
82 $ hg clone -e "python $TESTDIR/dummyssh" local ssh://user@dummy/remote1
83 searching for changes
83 searching for changes
84 remote: adding changesets
84 remote: adding changesets
85 remote: adding manifests
85 remote: adding manifests
86 remote: adding file changes
86 remote: adding file changes
87 remote: added 1 changesets with 1 changes to 1 files
87 remote: added 1 changesets with 1 changes to 1 files
88
88
89 init to existing repo
89 init to existing repo
90
90
91 $ hg init -e "python $TESTDIR/dummyssh" ssh://user@dummy/remote1
91 $ hg init -e "python $TESTDIR/dummyssh" ssh://user@dummy/remote1
92 abort: repository remote1 already exists!
92 abort: repository remote1 already exists!
93 abort: could not create remote repo!
93 abort: could not create remote repo!
94 [255]
94 [255]
95
95
96 clone to existing repo
96 clone to existing repo
97
97
98 $ hg clone -e "python $TESTDIR/dummyssh" local ssh://user@dummy/remote1
98 $ hg clone -e "python $TESTDIR/dummyssh" local ssh://user@dummy/remote1
99 abort: repository remote1 already exists!
99 abort: repository remote1 already exists!
100 abort: could not create remote repo!
100 abort: could not create remote repo!
101 [255]
101 [255]
102
102
103 output of dummyssh
103 output of dummyssh
104
104
105 $ cat dummylog
105 $ cat dummylog
106 Got arguments 1:user@dummy 2:hg init remote2
106 Got arguments 1:user@dummy 2:hg init remote2
107 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
107 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
108 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
108 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
109 Got arguments 1:user@dummy 2:hg init remote1
109 Got arguments 1:user@dummy 2:hg init remote1
110 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio
110 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio
111 Got arguments 1:user@dummy 2:hg init remote1
111 Got arguments 1:user@dummy 2:hg init remote1
112 Got arguments 1:user@dummy 2:hg init remote1
112 Got arguments 1:user@dummy 2:hg init remote1
113
113
114 comparing repositories
114 comparing repositories
115
115
116 $ hg tip -q -R local
116 $ hg tip -q -R local
117 0:08b9e9f63b32
117 0:08b9e9f63b32
118 $ hg tip -q -R remote1
118 $ hg tip -q -R remote1
119 0:08b9e9f63b32
119 0:08b9e9f63b32
120 $ hg tip -q -R remote2
120 $ hg tip -q -R remote2
121 0:08b9e9f63b32
121 0:08b9e9f63b32
122
122
123 check names for repositories (clashes with URL schemes, special chars)
123 check names for repositories (clashes with URL schemes, special chars)
124
124
125 $ for i in bundle file hg http https old-http ssh static-http " " "with space"; do
125 $ for i in bundle file hg http https old-http ssh static-http " " "with space"; do
126 > printf "hg init \"$i\"... "
126 > printf "hg init \"$i\"... "
127 > hg init "$i"
127 > hg init "$i"
128 > test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
128 > test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
129 > done
129 > done
130 hg init "bundle"... ok
130 hg init "bundle"... ok
131 hg init "file"... ok
131 hg init "file"... ok
132 hg init "hg"... ok
132 hg init "hg"... ok
133 hg init "http"... ok
133 hg init "http"... ok
134 hg init "https"... ok
134 hg init "https"... ok
135 hg init "old-http"... ok
135 hg init "old-http"... ok
136 hg init "ssh"... ok
136 hg init "ssh"... ok
137 hg init "static-http"... ok
137 hg init "static-http"... ok
138 hg init " "... ok
138 hg init " "... ok
139 hg init "with space"... ok
139 hg init "with space"... ok
140
140
141 creating 'local/sub/repo'
141 creating 'local/sub/repo'
142
142
143 $ hg init local/sub/repo
143 $ hg init local/sub/repo
144 $ checknewrepo local/sub/repo
144 $ checknewrepo local/sub/repo
145 store created
145 store created
146 00changelog.i created
146 00changelog.i created
147 revlogv1
147 revlogv1
148 fncache
148 store
149 store
149 fncache
150 dotencode
150 dotencode
151
151
152 prepare test of init of url configured from paths
152 prepare test of init of url configured from paths
153
153
154 $ echo '[paths]' >> $HGRCPATH
154 $ echo '[paths]' >> $HGRCPATH
155 $ echo "somewhere = `pwd`/url from paths" >> $HGRCPATH
155 $ echo "somewhere = `pwd`/url from paths" >> $HGRCPATH
156 $ echo "elsewhere = `pwd`/another paths url" >> $HGRCPATH
156 $ echo "elsewhere = `pwd`/another paths url" >> $HGRCPATH
157
157
158 init should (for consistency with clone) expand the url
158 init should (for consistency with clone) expand the url
159
159
160 $ hg init somewhere
160 $ hg init somewhere
161 $ checknewrepo "url from paths"
161 $ checknewrepo "url from paths"
162 store created
162 store created
163 00changelog.i created
163 00changelog.i created
164 revlogv1
164 revlogv1
165 fncache
165 store
166 store
166 fncache
167 dotencode
167 dotencode
168
168
169 verify that clone also expand urls
169 verify that clone also expand urls
170
170
171 $ hg clone somewhere elsewhere
171 $ hg clone somewhere elsewhere
172 updating to branch default
172 updating to branch default
173 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
173 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 $ checknewrepo "another paths url"
174 $ checknewrepo "another paths url"
175 store created
175 store created
176 00changelog.i created
176 00changelog.i created
177 revlogv1
177 revlogv1
178 fncache
178 store
179 store
179 fncache
180 dotencode
180 dotencode
181
181
182 clone bookmarks
182 clone bookmarks
183
183
184 $ hg -R local bookmark test
184 $ hg -R local bookmark test
185 $ hg -R local bookmarks
185 $ hg -R local bookmarks
186 * test 0:08b9e9f63b32
186 * test 0:08b9e9f63b32
187 $ hg clone -e "python $TESTDIR/dummyssh" local ssh://user@dummy/remote-bookmarks
187 $ hg clone -e "python $TESTDIR/dummyssh" local ssh://user@dummy/remote-bookmarks
188 searching for changes
188 searching for changes
189 remote: adding changesets
189 remote: adding changesets
190 remote: adding manifests
190 remote: adding manifests
191 remote: adding file changes
191 remote: adding file changes
192 remote: added 1 changesets with 1 changes to 1 files
192 remote: added 1 changesets with 1 changes to 1 files
193 $ hg -R remote-bookmarks bookmarks
193 $ hg -R remote-bookmarks bookmarks
194 test 0:08b9e9f63b32
194 test 0:08b9e9f63b32
General Comments 0
You need to be logged in to leave comments. Login now