##// END OF EJS Templates
changegroupsubset: simplify parents pruning
Benoit Boissinot -
r10010:2fce9691 default
parent child Browse files
Show More
@@ -1,2175 +1,2165
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self._branchcache = None # in UTF-8
101 self._branchcache = None # in UTF-8
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.nodetagscache = None
103 self.nodetagscache = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 @propertycache
108 @propertycache
109 def changelog(self):
109 def changelog(self):
110 c = changelog.changelog(self.sopener)
110 c = changelog.changelog(self.sopener)
111 if 'HG_PENDING' in os.environ:
111 if 'HG_PENDING' in os.environ:
112 p = os.environ['HG_PENDING']
112 p = os.environ['HG_PENDING']
113 if p.startswith(self.root):
113 if p.startswith(self.root):
114 c.readpending('00changelog.i.a')
114 c.readpending('00changelog.i.a')
115 self.sopener.defversion = c.version
115 self.sopener.defversion = c.version
116 return c
116 return c
117
117
118 @propertycache
118 @propertycache
119 def manifest(self):
119 def manifest(self):
120 return manifest.manifest(self.sopener)
120 return manifest.manifest(self.sopener)
121
121
122 @propertycache
122 @propertycache
123 def dirstate(self):
123 def dirstate(self):
124 return dirstate.dirstate(self.opener, self.ui, self.root)
124 return dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def __getitem__(self, changeid):
126 def __getitem__(self, changeid):
127 if changeid is None:
127 if changeid is None:
128 return context.workingctx(self)
128 return context.workingctx(self)
129 return context.changectx(self, changeid)
129 return context.changectx(self, changeid)
130
130
131 def __contains__(self, changeid):
131 def __contains__(self, changeid):
132 try:
132 try:
133 return bool(self.lookup(changeid))
133 return bool(self.lookup(changeid))
134 except error.RepoLookupError:
134 except error.RepoLookupError:
135 return False
135 return False
136
136
137 def __nonzero__(self):
137 def __nonzero__(self):
138 return True
138 return True
139
139
140 def __len__(self):
140 def __len__(self):
141 return len(self.changelog)
141 return len(self.changelog)
142
142
143 def __iter__(self):
143 def __iter__(self):
144 for i in xrange(len(self)):
144 for i in xrange(len(self)):
145 yield i
145 yield i
146
146
147 def url(self):
147 def url(self):
148 return 'file:' + self.root
148 return 'file:' + self.root
149
149
150 def hook(self, name, throw=False, **args):
150 def hook(self, name, throw=False, **args):
151 return hook.hook(self.ui, self, name, throw, **args)
151 return hook.hook(self.ui, self, name, throw, **args)
152
152
153 tag_disallowed = ':\r\n'
153 tag_disallowed = ':\r\n'
154
154
155 def _tag(self, names, node, message, local, user, date, extra={}):
155 def _tag(self, names, node, message, local, user, date, extra={}):
156 if isinstance(names, str):
156 if isinstance(names, str):
157 allchars = names
157 allchars = names
158 names = (names,)
158 names = (names,)
159 else:
159 else:
160 allchars = ''.join(names)
160 allchars = ''.join(names)
161 for c in self.tag_disallowed:
161 for c in self.tag_disallowed:
162 if c in allchars:
162 if c in allchars:
163 raise util.Abort(_('%r cannot be used in a tag name') % c)
163 raise util.Abort(_('%r cannot be used in a tag name') % c)
164
164
165 for name in names:
165 for name in names:
166 self.hook('pretag', throw=True, node=hex(node), tag=name,
166 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 local=local)
167 local=local)
168
168
169 def writetags(fp, names, munge, prevtags):
169 def writetags(fp, names, munge, prevtags):
170 fp.seek(0, 2)
170 fp.seek(0, 2)
171 if prevtags and prevtags[-1] != '\n':
171 if prevtags and prevtags[-1] != '\n':
172 fp.write('\n')
172 fp.write('\n')
173 for name in names:
173 for name in names:
174 m = munge and munge(name) or name
174 m = munge and munge(name) or name
175 if self._tagtypes and name in self._tagtypes:
175 if self._tagtypes and name in self._tagtypes:
176 old = self._tags.get(name, nullid)
176 old = self._tags.get(name, nullid)
177 fp.write('%s %s\n' % (hex(old), m))
177 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(node), m))
178 fp.write('%s %s\n' % (hex(node), m))
179 fp.close()
179 fp.close()
180
180
181 prevtags = ''
181 prevtags = ''
182 if local:
182 if local:
183 try:
183 try:
184 fp = self.opener('localtags', 'r+')
184 fp = self.opener('localtags', 'r+')
185 except IOError:
185 except IOError:
186 fp = self.opener('localtags', 'a')
186 fp = self.opener('localtags', 'a')
187 else:
187 else:
188 prevtags = fp.read()
188 prevtags = fp.read()
189
189
190 # local tags are stored in the current charset
190 # local tags are stored in the current charset
191 writetags(fp, names, None, prevtags)
191 writetags(fp, names, None, prevtags)
192 for name in names:
192 for name in names:
193 self.hook('tag', node=hex(node), tag=name, local=local)
193 self.hook('tag', node=hex(node), tag=name, local=local)
194 return
194 return
195
195
196 try:
196 try:
197 fp = self.wfile('.hgtags', 'rb+')
197 fp = self.wfile('.hgtags', 'rb+')
198 except IOError:
198 except IOError:
199 fp = self.wfile('.hgtags', 'ab')
199 fp = self.wfile('.hgtags', 'ab')
200 else:
200 else:
201 prevtags = fp.read()
201 prevtags = fp.read()
202
202
203 # committed tags are stored in UTF-8
203 # committed tags are stored in UTF-8
204 writetags(fp, names, encoding.fromlocal, prevtags)
204 writetags(fp, names, encoding.fromlocal, prevtags)
205
205
206 if '.hgtags' not in self.dirstate:
206 if '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
207 self.add(['.hgtags'])
208
208
209 m = match_.exact(self.root, '', ['.hgtags'])
209 m = match_.exact(self.root, '', ['.hgtags'])
210 tagnode = self.commit(message, user, date, extra=extra, match=m)
210 tagnode = self.commit(message, user, date, extra=extra, match=m)
211
211
212 for name in names:
212 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
214
214
215 return tagnode
215 return tagnode
216
216
217 def tag(self, names, node, message, local, user, date):
217 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
218 '''tag a revision with one or more symbolic names.
219
219
220 names is a list of strings or, when adding a single tag, names may be a
220 names is a list of strings or, when adding a single tag, names may be a
221 string.
221 string.
222
222
223 if local is True, the tags are stored in a per-repository file.
223 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
224 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
225 changeset is committed with the change.
226
226
227 keyword arguments:
227 keyword arguments:
228
228
229 local: whether to store tags in non-version-controlled file
229 local: whether to store tags in non-version-controlled file
230 (default False)
230 (default False)
231
231
232 message: commit message to use if committing
232 message: commit message to use if committing
233
233
234 user: name of user to use if committing
234 user: name of user to use if committing
235
235
236 date: date tuple to use if committing'''
236 date: date tuple to use if committing'''
237
237
238 for x in self.status()[:5]:
238 for x in self.status()[:5]:
239 if '.hgtags' in x:
239 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
240 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
241 '(please commit .hgtags manually)'))
242
242
243 self.tags() # instantiate the cache
243 self.tags() # instantiate the cache
244 self._tag(names, node, message, local, user, date)
244 self._tag(names, node, message, local, user, date)
245
245
246 def tags(self):
246 def tags(self):
247 '''return a mapping of tag to node'''
247 '''return a mapping of tag to node'''
248 if self._tags is None:
248 if self._tags is None:
249 (self._tags, self._tagtypes) = self._findtags()
249 (self._tags, self._tagtypes) = self._findtags()
250
250
251 return self._tags
251 return self._tags
252
252
253 def _findtags(self):
253 def _findtags(self):
254 '''Do the hard work of finding tags. Return a pair of dicts
254 '''Do the hard work of finding tags. Return a pair of dicts
255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 maps tag name to a string like \'global\' or \'local\'.
256 maps tag name to a string like \'global\' or \'local\'.
257 Subclasses or extensions are free to add their own tags, but
257 Subclasses or extensions are free to add their own tags, but
258 should be aware that the returned dicts will be retained for the
258 should be aware that the returned dicts will be retained for the
259 duration of the localrepo object.'''
259 duration of the localrepo object.'''
260
260
261 # XXX what tagtype should subclasses/extensions use? Currently
261 # XXX what tagtype should subclasses/extensions use? Currently
262 # mq and bookmarks add tags, but do not set the tagtype at all.
262 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # Should each extension invent its own tag type? Should there
263 # Should each extension invent its own tag type? Should there
264 # be one tagtype for all such "virtual" tags? Or is the status
264 # be one tagtype for all such "virtual" tags? Or is the status
265 # quo fine?
265 # quo fine?
266
266
267 alltags = {} # map tag name to (node, hist)
267 alltags = {} # map tag name to (node, hist)
268 tagtypes = {}
268 tagtypes = {}
269
269
270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272
272
273 # Build the return dicts. Have to re-encode tag names because
273 # Build the return dicts. Have to re-encode tag names because
274 # the tags module always uses UTF-8 (in order not to lose info
274 # the tags module always uses UTF-8 (in order not to lose info
275 # writing to the cache), but the rest of Mercurial wants them in
275 # writing to the cache), but the rest of Mercurial wants them in
276 # local encoding.
276 # local encoding.
277 tags = {}
277 tags = {}
278 for (name, (node, hist)) in alltags.iteritems():
278 for (name, (node, hist)) in alltags.iteritems():
279 if node != nullid:
279 if node != nullid:
280 tags[encoding.tolocal(name)] = node
280 tags[encoding.tolocal(name)] = node
281 tags['tip'] = self.changelog.tip()
281 tags['tip'] = self.changelog.tip()
282 tagtypes = dict([(encoding.tolocal(name), value)
282 tagtypes = dict([(encoding.tolocal(name), value)
283 for (name, value) in tagtypes.iteritems()])
283 for (name, value) in tagtypes.iteritems()])
284 return (tags, tagtypes)
284 return (tags, tagtypes)
285
285
286 def tagtype(self, tagname):
286 def tagtype(self, tagname):
287 '''
287 '''
288 return the type of the given tag. result can be:
288 return the type of the given tag. result can be:
289
289
290 'local' : a local tag
290 'local' : a local tag
291 'global' : a global tag
291 'global' : a global tag
292 None : tag does not exist
292 None : tag does not exist
293 '''
293 '''
294
294
295 self.tags()
295 self.tags()
296
296
297 return self._tagtypes.get(tagname)
297 return self._tagtypes.get(tagname)
298
298
299 def tagslist(self):
299 def tagslist(self):
300 '''return a list of tags ordered by revision'''
300 '''return a list of tags ordered by revision'''
301 l = []
301 l = []
302 for t, n in self.tags().iteritems():
302 for t, n in self.tags().iteritems():
303 try:
303 try:
304 r = self.changelog.rev(n)
304 r = self.changelog.rev(n)
305 except:
305 except:
306 r = -2 # sort to the beginning of the list if unknown
306 r = -2 # sort to the beginning of the list if unknown
307 l.append((r, t, n))
307 l.append((r, t, n))
308 return [(t, n) for r, t, n in sorted(l)]
308 return [(t, n) for r, t, n in sorted(l)]
309
309
310 def nodetags(self, node):
310 def nodetags(self, node):
311 '''return the tags associated with a node'''
311 '''return the tags associated with a node'''
312 if not self.nodetagscache:
312 if not self.nodetagscache:
313 self.nodetagscache = {}
313 self.nodetagscache = {}
314 for t, n in self.tags().iteritems():
314 for t, n in self.tags().iteritems():
315 self.nodetagscache.setdefault(n, []).append(t)
315 self.nodetagscache.setdefault(n, []).append(t)
316 return self.nodetagscache.get(node, [])
316 return self.nodetagscache.get(node, [])
317
317
318 def _branchtags(self, partial, lrev):
318 def _branchtags(self, partial, lrev):
319 # TODO: rename this function?
319 # TODO: rename this function?
320 tiprev = len(self) - 1
320 tiprev = len(self) - 1
321 if lrev != tiprev:
321 if lrev != tiprev:
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324
324
325 return partial
325 return partial
326
326
327 def branchmap(self):
327 def branchmap(self):
328 tip = self.changelog.tip()
328 tip = self.changelog.tip()
329 if self._branchcache is not None and self._branchcachetip == tip:
329 if self._branchcache is not None and self._branchcachetip == tip:
330 return self._branchcache
330 return self._branchcache
331
331
332 oldtip = self._branchcachetip
332 oldtip = self._branchcachetip
333 self._branchcachetip = tip
333 self._branchcachetip = tip
334 if oldtip is None or oldtip not in self.changelog.nodemap:
334 if oldtip is None or oldtip not in self.changelog.nodemap:
335 partial, last, lrev = self._readbranchcache()
335 partial, last, lrev = self._readbranchcache()
336 else:
336 else:
337 lrev = self.changelog.rev(oldtip)
337 lrev = self.changelog.rev(oldtip)
338 partial = self._branchcache
338 partial = self._branchcache
339
339
340 self._branchtags(partial, lrev)
340 self._branchtags(partial, lrev)
341 # this private cache holds all heads (not just tips)
341 # this private cache holds all heads (not just tips)
342 self._branchcache = partial
342 self._branchcache = partial
343
343
344 return self._branchcache
344 return self._branchcache
345
345
346 def branchtags(self):
346 def branchtags(self):
347 '''return a dict where branch names map to the tipmost head of
347 '''return a dict where branch names map to the tipmost head of
348 the branch, open heads come before closed'''
348 the branch, open heads come before closed'''
349 bt = {}
349 bt = {}
350 for bn, heads in self.branchmap().iteritems():
350 for bn, heads in self.branchmap().iteritems():
351 head = None
351 head = None
352 for i in range(len(heads)-1, -1, -1):
352 for i in range(len(heads)-1, -1, -1):
353 h = heads[i]
353 h = heads[i]
354 if 'close' not in self.changelog.read(h)[5]:
354 if 'close' not in self.changelog.read(h)[5]:
355 head = h
355 head = h
356 break
356 break
357 # no open heads were found
357 # no open heads were found
358 if head is None:
358 if head is None:
359 head = heads[-1]
359 head = heads[-1]
360 bt[bn] = head
360 bt[bn] = head
361 return bt
361 return bt
362
362
363
363
364 def _readbranchcache(self):
364 def _readbranchcache(self):
365 partial = {}
365 partial = {}
366 try:
366 try:
367 f = self.opener("branchheads.cache")
367 f = self.opener("branchheads.cache")
368 lines = f.read().split('\n')
368 lines = f.read().split('\n')
369 f.close()
369 f.close()
370 except (IOError, OSError):
370 except (IOError, OSError):
371 return {}, nullid, nullrev
371 return {}, nullid, nullrev
372
372
373 try:
373 try:
374 last, lrev = lines.pop(0).split(" ", 1)
374 last, lrev = lines.pop(0).split(" ", 1)
375 last, lrev = bin(last), int(lrev)
375 last, lrev = bin(last), int(lrev)
376 if lrev >= len(self) or self[lrev].node() != last:
376 if lrev >= len(self) or self[lrev].node() != last:
377 # invalidate the cache
377 # invalidate the cache
378 raise ValueError('invalidating branch cache (tip differs)')
378 raise ValueError('invalidating branch cache (tip differs)')
379 for l in lines:
379 for l in lines:
380 if not l: continue
380 if not l: continue
381 node, label = l.split(" ", 1)
381 node, label = l.split(" ", 1)
382 partial.setdefault(label.strip(), []).append(bin(node))
382 partial.setdefault(label.strip(), []).append(bin(node))
383 except KeyboardInterrupt:
383 except KeyboardInterrupt:
384 raise
384 raise
385 except Exception, inst:
385 except Exception, inst:
386 if self.ui.debugflag:
386 if self.ui.debugflag:
387 self.ui.warn(str(inst), '\n')
387 self.ui.warn(str(inst), '\n')
388 partial, last, lrev = {}, nullid, nullrev
388 partial, last, lrev = {}, nullid, nullrev
389 return partial, last, lrev
389 return partial, last, lrev
390
390
391 def _writebranchcache(self, branches, tip, tiprev):
391 def _writebranchcache(self, branches, tip, tiprev):
392 try:
392 try:
393 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f = self.opener("branchheads.cache", "w", atomictemp=True)
394 f.write("%s %s\n" % (hex(tip), tiprev))
394 f.write("%s %s\n" % (hex(tip), tiprev))
395 for label, nodes in branches.iteritems():
395 for label, nodes in branches.iteritems():
396 for node in nodes:
396 for node in nodes:
397 f.write("%s %s\n" % (hex(node), label))
397 f.write("%s %s\n" % (hex(node), label))
398 f.rename()
398 f.rename()
399 except (IOError, OSError):
399 except (IOError, OSError):
400 pass
400 pass
401
401
402 def _updatebranchcache(self, partial, start, end):
402 def _updatebranchcache(self, partial, start, end):
403 # collect new branch entries
403 # collect new branch entries
404 newbranches = {}
404 newbranches = {}
405 for r in xrange(start, end):
405 for r in xrange(start, end):
406 c = self[r]
406 c = self[r]
407 newbranches.setdefault(c.branch(), []).append(c.node())
407 newbranches.setdefault(c.branch(), []).append(c.node())
408 # if older branchheads are reachable from new ones, they aren't
408 # if older branchheads are reachable from new ones, they aren't
409 # really branchheads. Note checking parents is insufficient:
409 # really branchheads. Note checking parents is insufficient:
410 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
411 for branch, newnodes in newbranches.iteritems():
411 for branch, newnodes in newbranches.iteritems():
412 bheads = partial.setdefault(branch, [])
412 bheads = partial.setdefault(branch, [])
413 bheads.extend(newnodes)
413 bheads.extend(newnodes)
414 if len(bheads) < 2:
414 if len(bheads) < 2:
415 continue
415 continue
416 newbheads = []
416 newbheads = []
417 # starting from tip means fewer passes over reachable
417 # starting from tip means fewer passes over reachable
418 while newnodes:
418 while newnodes:
419 latest = newnodes.pop()
419 latest = newnodes.pop()
420 if latest not in bheads:
420 if latest not in bheads:
421 continue
421 continue
422 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
423 reachable = self.changelog.reachable(latest, minbhrev)
423 reachable = self.changelog.reachable(latest, minbhrev)
424 bheads = [b for b in bheads if b not in reachable]
424 bheads = [b for b in bheads if b not in reachable]
425 newbheads.insert(0, latest)
425 newbheads.insert(0, latest)
426 bheads.extend(newbheads)
426 bheads.extend(newbheads)
427 partial[branch] = bheads
427 partial[branch] = bheads
428
428
429 def lookup(self, key):
429 def lookup(self, key):
430 if isinstance(key, int):
430 if isinstance(key, int):
431 return self.changelog.node(key)
431 return self.changelog.node(key)
432 elif key == '.':
432 elif key == '.':
433 return self.dirstate.parents()[0]
433 return self.dirstate.parents()[0]
434 elif key == 'null':
434 elif key == 'null':
435 return nullid
435 return nullid
436 elif key == 'tip':
436 elif key == 'tip':
437 return self.changelog.tip()
437 return self.changelog.tip()
438 n = self.changelog._match(key)
438 n = self.changelog._match(key)
439 if n:
439 if n:
440 return n
440 return n
441 if key in self.tags():
441 if key in self.tags():
442 return self.tags()[key]
442 return self.tags()[key]
443 if key in self.branchtags():
443 if key in self.branchtags():
444 return self.branchtags()[key]
444 return self.branchtags()[key]
445 n = self.changelog._partialmatch(key)
445 n = self.changelog._partialmatch(key)
446 if n:
446 if n:
447 return n
447 return n
448
448
449 # can't find key, check if it might have come from damaged dirstate
449 # can't find key, check if it might have come from damaged dirstate
450 if key in self.dirstate.parents():
450 if key in self.dirstate.parents():
451 raise error.Abort(_("working directory has unknown parent '%s'!")
451 raise error.Abort(_("working directory has unknown parent '%s'!")
452 % short(key))
452 % short(key))
453 try:
453 try:
454 if len(key) == 20:
454 if len(key) == 20:
455 key = hex(key)
455 key = hex(key)
456 except:
456 except:
457 pass
457 pass
458 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458 raise error.RepoLookupError(_("unknown revision '%s'") % key)
459
459
460 def local(self):
460 def local(self):
461 return True
461 return True
462
462
463 def join(self, f):
463 def join(self, f):
464 return os.path.join(self.path, f)
464 return os.path.join(self.path, f)
465
465
466 def wjoin(self, f):
466 def wjoin(self, f):
467 return os.path.join(self.root, f)
467 return os.path.join(self.root, f)
468
468
469 def rjoin(self, f):
469 def rjoin(self, f):
470 return os.path.join(self.root, util.pconvert(f))
470 return os.path.join(self.root, util.pconvert(f))
471
471
472 def file(self, f):
472 def file(self, f):
473 if f[0] == '/':
473 if f[0] == '/':
474 f = f[1:]
474 f = f[1:]
475 return filelog.filelog(self.sopener, f)
475 return filelog.filelog(self.sopener, f)
476
476
477 def changectx(self, changeid):
477 def changectx(self, changeid):
478 return self[changeid]
478 return self[changeid]
479
479
480 def parents(self, changeid=None):
480 def parents(self, changeid=None):
481 '''get list of changectxs for parents of changeid'''
481 '''get list of changectxs for parents of changeid'''
482 return self[changeid].parents()
482 return self[changeid].parents()
483
483
484 def filectx(self, path, changeid=None, fileid=None):
484 def filectx(self, path, changeid=None, fileid=None):
485 """changeid can be a changeset revision, node, or tag.
485 """changeid can be a changeset revision, node, or tag.
486 fileid can be a file revision or node."""
486 fileid can be a file revision or node."""
487 return context.filectx(self, path, changeid, fileid)
487 return context.filectx(self, path, changeid, fileid)
488
488
489 def getcwd(self):
489 def getcwd(self):
490 return self.dirstate.getcwd()
490 return self.dirstate.getcwd()
491
491
492 def pathto(self, f, cwd=None):
492 def pathto(self, f, cwd=None):
493 return self.dirstate.pathto(f, cwd)
493 return self.dirstate.pathto(f, cwd)
494
494
495 def wfile(self, f, mode='r'):
495 def wfile(self, f, mode='r'):
496 return self.wopener(f, mode)
496 return self.wopener(f, mode)
497
497
498 def _link(self, f):
498 def _link(self, f):
499 return os.path.islink(self.wjoin(f))
499 return os.path.islink(self.wjoin(f))
500
500
501 def _filter(self, filter, filename, data):
501 def _filter(self, filter, filename, data):
502 if filter not in self.filterpats:
502 if filter not in self.filterpats:
503 l = []
503 l = []
504 for pat, cmd in self.ui.configitems(filter):
504 for pat, cmd in self.ui.configitems(filter):
505 if cmd == '!':
505 if cmd == '!':
506 continue
506 continue
507 mf = match_.match(self.root, '', [pat])
507 mf = match_.match(self.root, '', [pat])
508 fn = None
508 fn = None
509 params = cmd
509 params = cmd
510 for name, filterfn in self._datafilters.iteritems():
510 for name, filterfn in self._datafilters.iteritems():
511 if cmd.startswith(name):
511 if cmd.startswith(name):
512 fn = filterfn
512 fn = filterfn
513 params = cmd[len(name):].lstrip()
513 params = cmd[len(name):].lstrip()
514 break
514 break
515 if not fn:
515 if not fn:
516 fn = lambda s, c, **kwargs: util.filter(s, c)
516 fn = lambda s, c, **kwargs: util.filter(s, c)
517 # Wrap old filters not supporting keyword arguments
517 # Wrap old filters not supporting keyword arguments
518 if not inspect.getargspec(fn)[2]:
518 if not inspect.getargspec(fn)[2]:
519 oldfn = fn
519 oldfn = fn
520 fn = lambda s, c, **kwargs: oldfn(s, c)
520 fn = lambda s, c, **kwargs: oldfn(s, c)
521 l.append((mf, fn, params))
521 l.append((mf, fn, params))
522 self.filterpats[filter] = l
522 self.filterpats[filter] = l
523
523
524 for mf, fn, cmd in self.filterpats[filter]:
524 for mf, fn, cmd in self.filterpats[filter]:
525 if mf(filename):
525 if mf(filename):
526 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
528 break
528 break
529
529
530 return data
530 return data
531
531
532 def adddatafilter(self, name, filter):
532 def adddatafilter(self, name, filter):
533 self._datafilters[name] = filter
533 self._datafilters[name] = filter
534
534
535 def wread(self, filename):
535 def wread(self, filename):
536 if self._link(filename):
536 if self._link(filename):
537 data = os.readlink(self.wjoin(filename))
537 data = os.readlink(self.wjoin(filename))
538 else:
538 else:
539 data = self.wopener(filename, 'r').read()
539 data = self.wopener(filename, 'r').read()
540 return self._filter("encode", filename, data)
540 return self._filter("encode", filename, data)
541
541
542 def wwrite(self, filename, data, flags):
542 def wwrite(self, filename, data, flags):
543 data = self._filter("decode", filename, data)
543 data = self._filter("decode", filename, data)
544 try:
544 try:
545 os.unlink(self.wjoin(filename))
545 os.unlink(self.wjoin(filename))
546 except OSError:
546 except OSError:
547 pass
547 pass
548 if 'l' in flags:
548 if 'l' in flags:
549 self.wopener.symlink(data, filename)
549 self.wopener.symlink(data, filename)
550 else:
550 else:
551 self.wopener(filename, 'w').write(data)
551 self.wopener(filename, 'w').write(data)
552 if 'x' in flags:
552 if 'x' in flags:
553 util.set_flags(self.wjoin(filename), False, True)
553 util.set_flags(self.wjoin(filename), False, True)
554
554
555 def wwritedata(self, filename, data):
555 def wwritedata(self, filename, data):
556 return self._filter("decode", filename, data)
556 return self._filter("decode", filename, data)
557
557
558 def transaction(self):
558 def transaction(self):
559 tr = self._transref and self._transref() or None
559 tr = self._transref and self._transref() or None
560 if tr and tr.running():
560 if tr and tr.running():
561 return tr.nest()
561 return tr.nest()
562
562
563 # abort here if the journal already exists
563 # abort here if the journal already exists
564 if os.path.exists(self.sjoin("journal")):
564 if os.path.exists(self.sjoin("journal")):
565 raise error.RepoError(_("abandoned transaction found - run hg recover"))
565 raise error.RepoError(_("abandoned transaction found - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
574
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames),
580 aftertrans(renames),
581 self.store.createmode)
581 self.store.createmode)
582 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
583 return tr
583 return tr
584
584
585 def recover(self):
585 def recover(self):
586 lock = self.lock()
586 lock = self.lock()
587 try:
587 try:
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
590 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
591 self.invalidate()
591 self.invalidate()
592 return True
592 return True
593 else:
593 else:
594 self.ui.warn(_("no interrupted transaction available\n"))
594 self.ui.warn(_("no interrupted transaction available\n"))
595 return False
595 return False
596 finally:
596 finally:
597 lock.release()
597 lock.release()
598
598
599 def rollback(self):
599 def rollback(self):
600 wlock = lock = None
600 wlock = lock = None
601 try:
601 try:
602 wlock = self.wlock()
602 wlock = self.wlock()
603 lock = self.lock()
603 lock = self.lock()
604 if os.path.exists(self.sjoin("undo")):
604 if os.path.exists(self.sjoin("undo")):
605 self.ui.status(_("rolling back last transaction\n"))
605 self.ui.status(_("rolling back last transaction\n"))
606 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
606 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
608 try:
608 try:
609 branch = self.opener("undo.branch").read()
609 branch = self.opener("undo.branch").read()
610 self.dirstate.setbranch(branch)
610 self.dirstate.setbranch(branch)
611 except IOError:
611 except IOError:
612 self.ui.warn(_("Named branch could not be reset, "
612 self.ui.warn(_("Named branch could not be reset, "
613 "current branch still is: %s\n")
613 "current branch still is: %s\n")
614 % encoding.tolocal(self.dirstate.branch()))
614 % encoding.tolocal(self.dirstate.branch()))
615 self.invalidate()
615 self.invalidate()
616 self.dirstate.invalidate()
616 self.dirstate.invalidate()
617 self.destroyed()
617 self.destroyed()
618 else:
618 else:
619 self.ui.warn(_("no rollback information available\n"))
619 self.ui.warn(_("no rollback information available\n"))
620 finally:
620 finally:
621 release(lock, wlock)
621 release(lock, wlock)
622
622
623 def invalidate(self):
623 def invalidate(self):
624 for a in "changelog manifest".split():
624 for a in "changelog manifest".split():
625 if a in self.__dict__:
625 if a in self.__dict__:
626 delattr(self, a)
626 delattr(self, a)
627 self._tags = None
627 self._tags = None
628 self._tagtypes = None
628 self._tagtypes = None
629 self.nodetagscache = None
629 self.nodetagscache = None
630 self._branchcache = None # in UTF-8
630 self._branchcache = None # in UTF-8
631 self._branchcachetip = None
631 self._branchcachetip = None
632
632
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
634 try:
634 try:
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
635 l = lock.lock(lockname, 0, releasefn, desc=desc)
636 except error.LockHeld, inst:
636 except error.LockHeld, inst:
637 if not wait:
637 if not wait:
638 raise
638 raise
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
640 (desc, inst.locker))
640 (desc, inst.locker))
641 # default to 600 seconds timeout
641 # default to 600 seconds timeout
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
643 releasefn, desc=desc)
643 releasefn, desc=desc)
644 if acquirefn:
644 if acquirefn:
645 acquirefn()
645 acquirefn()
646 return l
646 return l
647
647
648 def lock(self, wait=True):
648 def lock(self, wait=True):
649 '''Lock the repository store (.hg/store) and return a weak reference
649 '''Lock the repository store (.hg/store) and return a weak reference
650 to the lock. Use this before modifying the store (e.g. committing or
650 to the lock. Use this before modifying the store (e.g. committing or
651 stripping). If you are opening a transaction, get a lock as well.)'''
651 stripping). If you are opening a transaction, get a lock as well.)'''
652 l = self._lockref and self._lockref()
652 l = self._lockref and self._lockref()
653 if l is not None and l.held:
653 if l is not None and l.held:
654 l.lock()
654 l.lock()
655 return l
655 return l
656
656
657 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
657 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
658 _('repository %s') % self.origroot)
658 _('repository %s') % self.origroot)
659 self._lockref = weakref.ref(l)
659 self._lockref = weakref.ref(l)
660 return l
660 return l
661
661
662 def wlock(self, wait=True):
662 def wlock(self, wait=True):
663 '''Lock the non-store parts of the repository (everything under
663 '''Lock the non-store parts of the repository (everything under
664 .hg except .hg/store) and return a weak reference to the lock.
664 .hg except .hg/store) and return a weak reference to the lock.
665 Use this before modifying files in .hg.'''
665 Use this before modifying files in .hg.'''
666 l = self._wlockref and self._wlockref()
666 l = self._wlockref and self._wlockref()
667 if l is not None and l.held:
667 if l is not None and l.held:
668 l.lock()
668 l.lock()
669 return l
669 return l
670
670
671 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
671 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
672 self.dirstate.invalidate, _('working directory of %s') %
672 self.dirstate.invalidate, _('working directory of %s') %
673 self.origroot)
673 self.origroot)
674 self._wlockref = weakref.ref(l)
674 self._wlockref = weakref.ref(l)
675 return l
675 return l
676
676
677 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
677 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
678 """
678 """
679 commit an individual file as part of a larger transaction
679 commit an individual file as part of a larger transaction
680 """
680 """
681
681
682 fname = fctx.path()
682 fname = fctx.path()
683 text = fctx.data()
683 text = fctx.data()
684 flog = self.file(fname)
684 flog = self.file(fname)
685 fparent1 = manifest1.get(fname, nullid)
685 fparent1 = manifest1.get(fname, nullid)
686 fparent2 = fparent2o = manifest2.get(fname, nullid)
686 fparent2 = fparent2o = manifest2.get(fname, nullid)
687
687
688 meta = {}
688 meta = {}
689 copy = fctx.renamed()
689 copy = fctx.renamed()
690 if copy and copy[0] != fname:
690 if copy and copy[0] != fname:
691 # Mark the new revision of this file as a copy of another
691 # Mark the new revision of this file as a copy of another
692 # file. This copy data will effectively act as a parent
692 # file. This copy data will effectively act as a parent
693 # of this new revision. If this is a merge, the first
693 # of this new revision. If this is a merge, the first
694 # parent will be the nullid (meaning "look up the copy data")
694 # parent will be the nullid (meaning "look up the copy data")
695 # and the second one will be the other parent. For example:
695 # and the second one will be the other parent. For example:
696 #
696 #
697 # 0 --- 1 --- 3 rev1 changes file foo
697 # 0 --- 1 --- 3 rev1 changes file foo
698 # \ / rev2 renames foo to bar and changes it
698 # \ / rev2 renames foo to bar and changes it
699 # \- 2 -/ rev3 should have bar with all changes and
699 # \- 2 -/ rev3 should have bar with all changes and
700 # should record that bar descends from
700 # should record that bar descends from
701 # bar in rev2 and foo in rev1
701 # bar in rev2 and foo in rev1
702 #
702 #
703 # this allows this merge to succeed:
703 # this allows this merge to succeed:
704 #
704 #
705 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
705 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
706 # \ / merging rev3 and rev4 should use bar@rev2
706 # \ / merging rev3 and rev4 should use bar@rev2
707 # \- 2 --- 4 as the merge base
707 # \- 2 --- 4 as the merge base
708 #
708 #
709
709
710 cfname = copy[0]
710 cfname = copy[0]
711 crev = manifest1.get(cfname)
711 crev = manifest1.get(cfname)
712 newfparent = fparent2
712 newfparent = fparent2
713
713
714 if manifest2: # branch merge
714 if manifest2: # branch merge
715 if fparent2 == nullid or crev is None: # copied on remote side
715 if fparent2 == nullid or crev is None: # copied on remote side
716 if cfname in manifest2:
716 if cfname in manifest2:
717 crev = manifest2[cfname]
717 crev = manifest2[cfname]
718 newfparent = fparent1
718 newfparent = fparent1
719
719
720 # find source in nearest ancestor if we've lost track
720 # find source in nearest ancestor if we've lost track
721 if not crev:
721 if not crev:
722 self.ui.debug(" %s: searching for copy revision for %s\n" %
722 self.ui.debug(" %s: searching for copy revision for %s\n" %
723 (fname, cfname))
723 (fname, cfname))
724 for ancestor in self['.'].ancestors():
724 for ancestor in self['.'].ancestors():
725 if cfname in ancestor:
725 if cfname in ancestor:
726 crev = ancestor[cfname].filenode()
726 crev = ancestor[cfname].filenode()
727 break
727 break
728
728
729 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
729 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
730 meta["copy"] = cfname
730 meta["copy"] = cfname
731 meta["copyrev"] = hex(crev)
731 meta["copyrev"] = hex(crev)
732 fparent1, fparent2 = nullid, newfparent
732 fparent1, fparent2 = nullid, newfparent
733 elif fparent2 != nullid:
733 elif fparent2 != nullid:
734 # is one parent an ancestor of the other?
734 # is one parent an ancestor of the other?
735 fparentancestor = flog.ancestor(fparent1, fparent2)
735 fparentancestor = flog.ancestor(fparent1, fparent2)
736 if fparentancestor == fparent1:
736 if fparentancestor == fparent1:
737 fparent1, fparent2 = fparent2, nullid
737 fparent1, fparent2 = fparent2, nullid
738 elif fparentancestor == fparent2:
738 elif fparentancestor == fparent2:
739 fparent2 = nullid
739 fparent2 = nullid
740
740
741 # is the file changed?
741 # is the file changed?
742 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
742 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
743 changelist.append(fname)
743 changelist.append(fname)
744 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
744 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
745
745
746 # are just the flags changed during merge?
746 # are just the flags changed during merge?
747 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
747 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
748 changelist.append(fname)
748 changelist.append(fname)
749
749
750 return fparent1
750 return fparent1
751
751
752 def commit(self, text="", user=None, date=None, match=None, force=False,
752 def commit(self, text="", user=None, date=None, match=None, force=False,
753 editor=False, extra={}):
753 editor=False, extra={}):
754 """Add a new revision to current repository.
754 """Add a new revision to current repository.
755
755
756 Revision information is gathered from the working directory,
756 Revision information is gathered from the working directory,
757 match can be used to filter the committed files. If editor is
757 match can be used to filter the committed files. If editor is
758 supplied, it is called to get a commit message.
758 supplied, it is called to get a commit message.
759 """
759 """
760
760
761 def fail(f, msg):
761 def fail(f, msg):
762 raise util.Abort('%s: %s' % (f, msg))
762 raise util.Abort('%s: %s' % (f, msg))
763
763
764 if not match:
764 if not match:
765 match = match_.always(self.root, '')
765 match = match_.always(self.root, '')
766
766
767 if not force:
767 if not force:
768 vdirs = []
768 vdirs = []
769 match.dir = vdirs.append
769 match.dir = vdirs.append
770 match.bad = fail
770 match.bad = fail
771
771
772 wlock = self.wlock()
772 wlock = self.wlock()
773 try:
773 try:
774 p1, p2 = self.dirstate.parents()
774 p1, p2 = self.dirstate.parents()
775 wctx = self[None]
775 wctx = self[None]
776
776
777 if (not force and p2 != nullid and match and
777 if (not force and p2 != nullid and match and
778 (match.files() or match.anypats())):
778 (match.files() or match.anypats())):
779 raise util.Abort(_('cannot partially commit a merge '
779 raise util.Abort(_('cannot partially commit a merge '
780 '(do not specify files or patterns)'))
780 '(do not specify files or patterns)'))
781
781
782 changes = self.status(match=match, clean=force)
782 changes = self.status(match=match, clean=force)
783 if force:
783 if force:
784 changes[0].extend(changes[6]) # mq may commit unchanged files
784 changes[0].extend(changes[6]) # mq may commit unchanged files
785
785
786 # check subrepos
786 # check subrepos
787 subs = []
787 subs = []
788 for s in wctx.substate:
788 for s in wctx.substate:
789 if match(s) and wctx.sub(s).dirty():
789 if match(s) and wctx.sub(s).dirty():
790 subs.append(s)
790 subs.append(s)
791 if subs and '.hgsubstate' not in changes[0]:
791 if subs and '.hgsubstate' not in changes[0]:
792 changes[0].insert(0, '.hgsubstate')
792 changes[0].insert(0, '.hgsubstate')
793
793
794 # make sure all explicit patterns are matched
794 # make sure all explicit patterns are matched
795 if not force and match.files():
795 if not force and match.files():
796 matched = set(changes[0] + changes[1] + changes[2])
796 matched = set(changes[0] + changes[1] + changes[2])
797
797
798 for f in match.files():
798 for f in match.files():
799 if f == '.' or f in matched or f in wctx.substate:
799 if f == '.' or f in matched or f in wctx.substate:
800 continue
800 continue
801 if f in changes[3]: # missing
801 if f in changes[3]: # missing
802 fail(f, _('file not found!'))
802 fail(f, _('file not found!'))
803 if f in vdirs: # visited directory
803 if f in vdirs: # visited directory
804 d = f + '/'
804 d = f + '/'
805 for mf in matched:
805 for mf in matched:
806 if mf.startswith(d):
806 if mf.startswith(d):
807 break
807 break
808 else:
808 else:
809 fail(f, _("no match under directory!"))
809 fail(f, _("no match under directory!"))
810 elif f not in self.dirstate:
810 elif f not in self.dirstate:
811 fail(f, _("file not tracked!"))
811 fail(f, _("file not tracked!"))
812
812
813 if (not force and not extra.get("close") and p2 == nullid
813 if (not force and not extra.get("close") and p2 == nullid
814 and not (changes[0] or changes[1] or changes[2])
814 and not (changes[0] or changes[1] or changes[2])
815 and self[None].branch() == self['.'].branch()):
815 and self[None].branch() == self['.'].branch()):
816 return None
816 return None
817
817
818 ms = merge_.mergestate(self)
818 ms = merge_.mergestate(self)
819 for f in changes[0]:
819 for f in changes[0]:
820 if f in ms and ms[f] == 'u':
820 if f in ms and ms[f] == 'u':
821 raise util.Abort(_("unresolved merge conflicts "
821 raise util.Abort(_("unresolved merge conflicts "
822 "(see hg resolve)"))
822 "(see hg resolve)"))
823
823
824 cctx = context.workingctx(self, (p1, p2), text, user, date,
824 cctx = context.workingctx(self, (p1, p2), text, user, date,
825 extra, changes)
825 extra, changes)
826 if editor:
826 if editor:
827 cctx._text = editor(self, cctx, subs)
827 cctx._text = editor(self, cctx, subs)
828 edited = (text != cctx._text)
828 edited = (text != cctx._text)
829
829
830 # commit subs
830 # commit subs
831 if subs:
831 if subs:
832 state = wctx.substate.copy()
832 state = wctx.substate.copy()
833 for s in subs:
833 for s in subs:
834 self.ui.status(_('committing subrepository %s\n') % s)
834 self.ui.status(_('committing subrepository %s\n') % s)
835 sr = wctx.sub(s).commit(cctx._text, user, date)
835 sr = wctx.sub(s).commit(cctx._text, user, date)
836 state[s] = (state[s][0], sr)
836 state[s] = (state[s][0], sr)
837 subrepo.writestate(self, state)
837 subrepo.writestate(self, state)
838
838
839 # Save commit message in case this transaction gets rolled back
839 # Save commit message in case this transaction gets rolled back
840 # (e.g. by a pretxncommit hook). Leave the content alone on
840 # (e.g. by a pretxncommit hook). Leave the content alone on
841 # the assumption that the user will use the same editor again.
841 # the assumption that the user will use the same editor again.
842 msgfile = self.opener('last-message.txt', 'wb')
842 msgfile = self.opener('last-message.txt', 'wb')
843 msgfile.write(cctx._text)
843 msgfile.write(cctx._text)
844 msgfile.close()
844 msgfile.close()
845
845
846 try:
846 try:
847 ret = self.commitctx(cctx, True)
847 ret = self.commitctx(cctx, True)
848 except:
848 except:
849 if edited:
849 if edited:
850 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
850 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
851 self.ui.write(
851 self.ui.write(
852 _('note: commit message saved in %s\n') % msgfn)
852 _('note: commit message saved in %s\n') % msgfn)
853 raise
853 raise
854
854
855 # update dirstate and mergestate
855 # update dirstate and mergestate
856 for f in changes[0] + changes[1]:
856 for f in changes[0] + changes[1]:
857 self.dirstate.normal(f)
857 self.dirstate.normal(f)
858 for f in changes[2]:
858 for f in changes[2]:
859 self.dirstate.forget(f)
859 self.dirstate.forget(f)
860 self.dirstate.setparents(ret)
860 self.dirstate.setparents(ret)
861 ms.reset()
861 ms.reset()
862
862
863 return ret
863 return ret
864
864
865 finally:
865 finally:
866 wlock.release()
866 wlock.release()
867
867
868 def commitctx(self, ctx, error=False):
868 def commitctx(self, ctx, error=False):
869 """Add a new revision to current repository.
869 """Add a new revision to current repository.
870
870
871 Revision information is passed via the context argument.
871 Revision information is passed via the context argument.
872 """
872 """
873
873
874 tr = lock = None
874 tr = lock = None
875 removed = ctx.removed()
875 removed = ctx.removed()
876 p1, p2 = ctx.p1(), ctx.p2()
876 p1, p2 = ctx.p1(), ctx.p2()
877 m1 = p1.manifest().copy()
877 m1 = p1.manifest().copy()
878 m2 = p2.manifest()
878 m2 = p2.manifest()
879 user = ctx.user()
879 user = ctx.user()
880
880
881 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
881 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
882 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
882 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
883
883
884 lock = self.lock()
884 lock = self.lock()
885 try:
885 try:
886 tr = self.transaction()
886 tr = self.transaction()
887 trp = weakref.proxy(tr)
887 trp = weakref.proxy(tr)
888
888
889 # check in files
889 # check in files
890 new = {}
890 new = {}
891 changed = []
891 changed = []
892 linkrev = len(self)
892 linkrev = len(self)
893 for f in sorted(ctx.modified() + ctx.added()):
893 for f in sorted(ctx.modified() + ctx.added()):
894 self.ui.note(f + "\n")
894 self.ui.note(f + "\n")
895 try:
895 try:
896 fctx = ctx[f]
896 fctx = ctx[f]
897 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
897 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
898 changed)
898 changed)
899 m1.set(f, fctx.flags())
899 m1.set(f, fctx.flags())
900 except (OSError, IOError):
900 except (OSError, IOError):
901 if error:
901 if error:
902 self.ui.warn(_("trouble committing %s!\n") % f)
902 self.ui.warn(_("trouble committing %s!\n") % f)
903 raise
903 raise
904 else:
904 else:
905 removed.append(f)
905 removed.append(f)
906
906
907 # update manifest
907 # update manifest
908 m1.update(new)
908 m1.update(new)
909 removed = [f for f in sorted(removed) if f in m1 or f in m2]
909 removed = [f for f in sorted(removed) if f in m1 or f in m2]
910 drop = [f for f in removed if f in m1]
910 drop = [f for f in removed if f in m1]
911 for f in drop:
911 for f in drop:
912 del m1[f]
912 del m1[f]
913 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
913 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
914 p2.manifestnode(), (new, drop))
914 p2.manifestnode(), (new, drop))
915
915
916 # update changelog
916 # update changelog
917 self.changelog.delayupdate()
917 self.changelog.delayupdate()
918 n = self.changelog.add(mn, changed + removed, ctx.description(),
918 n = self.changelog.add(mn, changed + removed, ctx.description(),
919 trp, p1.node(), p2.node(),
919 trp, p1.node(), p2.node(),
920 user, ctx.date(), ctx.extra().copy())
920 user, ctx.date(), ctx.extra().copy())
921 p = lambda: self.changelog.writepending() and self.root or ""
921 p = lambda: self.changelog.writepending() and self.root or ""
922 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
922 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
923 parent2=xp2, pending=p)
923 parent2=xp2, pending=p)
924 self.changelog.finalize(trp)
924 self.changelog.finalize(trp)
925 tr.close()
925 tr.close()
926
926
927 if self._branchcache:
927 if self._branchcache:
928 self.branchtags()
928 self.branchtags()
929
929
930 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
930 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
931 return n
931 return n
932 finally:
932 finally:
933 del tr
933 del tr
934 lock.release()
934 lock.release()
935
935
936 def destroyed(self):
936 def destroyed(self):
937 '''Inform the repository that nodes have been destroyed.
937 '''Inform the repository that nodes have been destroyed.
938 Intended for use by strip and rollback, so there's a common
938 Intended for use by strip and rollback, so there's a common
939 place for anything that has to be done after destroying history.'''
939 place for anything that has to be done after destroying history.'''
940 # XXX it might be nice if we could take the list of destroyed
940 # XXX it might be nice if we could take the list of destroyed
941 # nodes, but I don't see an easy way for rollback() to do that
941 # nodes, but I don't see an easy way for rollback() to do that
942
942
943 # Ensure the persistent tag cache is updated. Doing it now
943 # Ensure the persistent tag cache is updated. Doing it now
944 # means that the tag cache only has to worry about destroyed
944 # means that the tag cache only has to worry about destroyed
945 # heads immediately after a strip/rollback. That in turn
945 # heads immediately after a strip/rollback. That in turn
946 # guarantees that "cachetip == currenttip" (comparing both rev
946 # guarantees that "cachetip == currenttip" (comparing both rev
947 # and node) always means no nodes have been added or destroyed.
947 # and node) always means no nodes have been added or destroyed.
948
948
949 # XXX this is suboptimal when qrefresh'ing: we strip the current
949 # XXX this is suboptimal when qrefresh'ing: we strip the current
950 # head, refresh the tag cache, then immediately add a new head.
950 # head, refresh the tag cache, then immediately add a new head.
951 # But I think doing it this way is necessary for the "instant
951 # But I think doing it this way is necessary for the "instant
952 # tag cache retrieval" case to work.
952 # tag cache retrieval" case to work.
953 tags_.findglobaltags(self.ui, self, {}, {})
953 tags_.findglobaltags(self.ui, self, {}, {})
954
954
955 def walk(self, match, node=None):
955 def walk(self, match, node=None):
956 '''
956 '''
957 walk recursively through the directory tree or a given
957 walk recursively through the directory tree or a given
958 changeset, finding all files matched by the match
958 changeset, finding all files matched by the match
959 function
959 function
960 '''
960 '''
961 return self[node].walk(match)
961 return self[node].walk(match)
962
962
963 def status(self, node1='.', node2=None, match=None,
963 def status(self, node1='.', node2=None, match=None,
964 ignored=False, clean=False, unknown=False):
964 ignored=False, clean=False, unknown=False):
965 """return status of files between two nodes or node and working directory
965 """return status of files between two nodes or node and working directory
966
966
967 If node1 is None, use the first dirstate parent instead.
967 If node1 is None, use the first dirstate parent instead.
968 If node2 is None, compare node1 with working directory.
968 If node2 is None, compare node1 with working directory.
969 """
969 """
970
970
971 def mfmatches(ctx):
971 def mfmatches(ctx):
972 mf = ctx.manifest().copy()
972 mf = ctx.manifest().copy()
973 for fn in mf.keys():
973 for fn in mf.keys():
974 if not match(fn):
974 if not match(fn):
975 del mf[fn]
975 del mf[fn]
976 return mf
976 return mf
977
977
978 if isinstance(node1, context.changectx):
978 if isinstance(node1, context.changectx):
979 ctx1 = node1
979 ctx1 = node1
980 else:
980 else:
981 ctx1 = self[node1]
981 ctx1 = self[node1]
982 if isinstance(node2, context.changectx):
982 if isinstance(node2, context.changectx):
983 ctx2 = node2
983 ctx2 = node2
984 else:
984 else:
985 ctx2 = self[node2]
985 ctx2 = self[node2]
986
986
987 working = ctx2.rev() is None
987 working = ctx2.rev() is None
988 parentworking = working and ctx1 == self['.']
988 parentworking = working and ctx1 == self['.']
989 match = match or match_.always(self.root, self.getcwd())
989 match = match or match_.always(self.root, self.getcwd())
990 listignored, listclean, listunknown = ignored, clean, unknown
990 listignored, listclean, listunknown = ignored, clean, unknown
991
991
992 # load earliest manifest first for caching reasons
992 # load earliest manifest first for caching reasons
993 if not working and ctx2.rev() < ctx1.rev():
993 if not working and ctx2.rev() < ctx1.rev():
994 ctx2.manifest()
994 ctx2.manifest()
995
995
996 if not parentworking:
996 if not parentworking:
997 def bad(f, msg):
997 def bad(f, msg):
998 if f not in ctx1:
998 if f not in ctx1:
999 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
999 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1000 match.bad = bad
1000 match.bad = bad
1001
1001
1002 if working: # we need to scan the working dir
1002 if working: # we need to scan the working dir
1003 s = self.dirstate.status(match, listignored, listclean, listunknown)
1003 s = self.dirstate.status(match, listignored, listclean, listunknown)
1004 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1004 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1005
1005
1006 # check for any possibly clean files
1006 # check for any possibly clean files
1007 if parentworking and cmp:
1007 if parentworking and cmp:
1008 fixup = []
1008 fixup = []
1009 # do a full compare of any files that might have changed
1009 # do a full compare of any files that might have changed
1010 for f in sorted(cmp):
1010 for f in sorted(cmp):
1011 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1011 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1012 or ctx1[f].cmp(ctx2[f].data())):
1012 or ctx1[f].cmp(ctx2[f].data())):
1013 modified.append(f)
1013 modified.append(f)
1014 else:
1014 else:
1015 fixup.append(f)
1015 fixup.append(f)
1016
1016
1017 if listclean:
1017 if listclean:
1018 clean += fixup
1018 clean += fixup
1019
1019
1020 # update dirstate for files that are actually clean
1020 # update dirstate for files that are actually clean
1021 if fixup:
1021 if fixup:
1022 try:
1022 try:
1023 # updating the dirstate is optional
1023 # updating the dirstate is optional
1024 # so we don't wait on the lock
1024 # so we don't wait on the lock
1025 wlock = self.wlock(False)
1025 wlock = self.wlock(False)
1026 try:
1026 try:
1027 for f in fixup:
1027 for f in fixup:
1028 self.dirstate.normal(f)
1028 self.dirstate.normal(f)
1029 finally:
1029 finally:
1030 wlock.release()
1030 wlock.release()
1031 except error.LockError:
1031 except error.LockError:
1032 pass
1032 pass
1033
1033
1034 if not parentworking:
1034 if not parentworking:
1035 mf1 = mfmatches(ctx1)
1035 mf1 = mfmatches(ctx1)
1036 if working:
1036 if working:
1037 # we are comparing working dir against non-parent
1037 # we are comparing working dir against non-parent
1038 # generate a pseudo-manifest for the working dir
1038 # generate a pseudo-manifest for the working dir
1039 mf2 = mfmatches(self['.'])
1039 mf2 = mfmatches(self['.'])
1040 for f in cmp + modified + added:
1040 for f in cmp + modified + added:
1041 mf2[f] = None
1041 mf2[f] = None
1042 mf2.set(f, ctx2.flags(f))
1042 mf2.set(f, ctx2.flags(f))
1043 for f in removed:
1043 for f in removed:
1044 if f in mf2:
1044 if f in mf2:
1045 del mf2[f]
1045 del mf2[f]
1046 else:
1046 else:
1047 # we are comparing two revisions
1047 # we are comparing two revisions
1048 deleted, unknown, ignored = [], [], []
1048 deleted, unknown, ignored = [], [], []
1049 mf2 = mfmatches(ctx2)
1049 mf2 = mfmatches(ctx2)
1050
1050
1051 modified, added, clean = [], [], []
1051 modified, added, clean = [], [], []
1052 for fn in mf2:
1052 for fn in mf2:
1053 if fn in mf1:
1053 if fn in mf1:
1054 if (mf1.flags(fn) != mf2.flags(fn) or
1054 if (mf1.flags(fn) != mf2.flags(fn) or
1055 (mf1[fn] != mf2[fn] and
1055 (mf1[fn] != mf2[fn] and
1056 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1056 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1057 modified.append(fn)
1057 modified.append(fn)
1058 elif listclean:
1058 elif listclean:
1059 clean.append(fn)
1059 clean.append(fn)
1060 del mf1[fn]
1060 del mf1[fn]
1061 else:
1061 else:
1062 added.append(fn)
1062 added.append(fn)
1063 removed = mf1.keys()
1063 removed = mf1.keys()
1064
1064
1065 r = modified, added, removed, deleted, unknown, ignored, clean
1065 r = modified, added, removed, deleted, unknown, ignored, clean
1066 [l.sort() for l in r]
1066 [l.sort() for l in r]
1067 return r
1067 return r
1068
1068
1069 def add(self, list):
1069 def add(self, list):
1070 wlock = self.wlock()
1070 wlock = self.wlock()
1071 try:
1071 try:
1072 rejected = []
1072 rejected = []
1073 for f in list:
1073 for f in list:
1074 p = self.wjoin(f)
1074 p = self.wjoin(f)
1075 try:
1075 try:
1076 st = os.lstat(p)
1076 st = os.lstat(p)
1077 except:
1077 except:
1078 self.ui.warn(_("%s does not exist!\n") % f)
1078 self.ui.warn(_("%s does not exist!\n") % f)
1079 rejected.append(f)
1079 rejected.append(f)
1080 continue
1080 continue
1081 if st.st_size > 10000000:
1081 if st.st_size > 10000000:
1082 self.ui.warn(_("%s: files over 10MB may cause memory and"
1082 self.ui.warn(_("%s: files over 10MB may cause memory and"
1083 " performance problems\n"
1083 " performance problems\n"
1084 "(use 'hg revert %s' to unadd the file)\n")
1084 "(use 'hg revert %s' to unadd the file)\n")
1085 % (f, f))
1085 % (f, f))
1086 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1086 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1087 self.ui.warn(_("%s not added: only files and symlinks "
1087 self.ui.warn(_("%s not added: only files and symlinks "
1088 "supported currently\n") % f)
1088 "supported currently\n") % f)
1089 rejected.append(p)
1089 rejected.append(p)
1090 elif self.dirstate[f] in 'amn':
1090 elif self.dirstate[f] in 'amn':
1091 self.ui.warn(_("%s already tracked!\n") % f)
1091 self.ui.warn(_("%s already tracked!\n") % f)
1092 elif self.dirstate[f] == 'r':
1092 elif self.dirstate[f] == 'r':
1093 self.dirstate.normallookup(f)
1093 self.dirstate.normallookup(f)
1094 else:
1094 else:
1095 self.dirstate.add(f)
1095 self.dirstate.add(f)
1096 return rejected
1096 return rejected
1097 finally:
1097 finally:
1098 wlock.release()
1098 wlock.release()
1099
1099
1100 def forget(self, list):
1100 def forget(self, list):
1101 wlock = self.wlock()
1101 wlock = self.wlock()
1102 try:
1102 try:
1103 for f in list:
1103 for f in list:
1104 if self.dirstate[f] != 'a':
1104 if self.dirstate[f] != 'a':
1105 self.ui.warn(_("%s not added!\n") % f)
1105 self.ui.warn(_("%s not added!\n") % f)
1106 else:
1106 else:
1107 self.dirstate.forget(f)
1107 self.dirstate.forget(f)
1108 finally:
1108 finally:
1109 wlock.release()
1109 wlock.release()
1110
1110
1111 def remove(self, list, unlink=False):
1111 def remove(self, list, unlink=False):
1112 if unlink:
1112 if unlink:
1113 for f in list:
1113 for f in list:
1114 try:
1114 try:
1115 util.unlink(self.wjoin(f))
1115 util.unlink(self.wjoin(f))
1116 except OSError, inst:
1116 except OSError, inst:
1117 if inst.errno != errno.ENOENT:
1117 if inst.errno != errno.ENOENT:
1118 raise
1118 raise
1119 wlock = self.wlock()
1119 wlock = self.wlock()
1120 try:
1120 try:
1121 for f in list:
1121 for f in list:
1122 if unlink and os.path.exists(self.wjoin(f)):
1122 if unlink and os.path.exists(self.wjoin(f)):
1123 self.ui.warn(_("%s still exists!\n") % f)
1123 self.ui.warn(_("%s still exists!\n") % f)
1124 elif self.dirstate[f] == 'a':
1124 elif self.dirstate[f] == 'a':
1125 self.dirstate.forget(f)
1125 self.dirstate.forget(f)
1126 elif f not in self.dirstate:
1126 elif f not in self.dirstate:
1127 self.ui.warn(_("%s not tracked!\n") % f)
1127 self.ui.warn(_("%s not tracked!\n") % f)
1128 else:
1128 else:
1129 self.dirstate.remove(f)
1129 self.dirstate.remove(f)
1130 finally:
1130 finally:
1131 wlock.release()
1131 wlock.release()
1132
1132
1133 def undelete(self, list):
1133 def undelete(self, list):
1134 manifests = [self.manifest.read(self.changelog.read(p)[0])
1134 manifests = [self.manifest.read(self.changelog.read(p)[0])
1135 for p in self.dirstate.parents() if p != nullid]
1135 for p in self.dirstate.parents() if p != nullid]
1136 wlock = self.wlock()
1136 wlock = self.wlock()
1137 try:
1137 try:
1138 for f in list:
1138 for f in list:
1139 if self.dirstate[f] != 'r':
1139 if self.dirstate[f] != 'r':
1140 self.ui.warn(_("%s not removed!\n") % f)
1140 self.ui.warn(_("%s not removed!\n") % f)
1141 else:
1141 else:
1142 m = f in manifests[0] and manifests[0] or manifests[1]
1142 m = f in manifests[0] and manifests[0] or manifests[1]
1143 t = self.file(f).read(m[f])
1143 t = self.file(f).read(m[f])
1144 self.wwrite(f, t, m.flags(f))
1144 self.wwrite(f, t, m.flags(f))
1145 self.dirstate.normal(f)
1145 self.dirstate.normal(f)
1146 finally:
1146 finally:
1147 wlock.release()
1147 wlock.release()
1148
1148
1149 def copy(self, source, dest):
1149 def copy(self, source, dest):
1150 p = self.wjoin(dest)
1150 p = self.wjoin(dest)
1151 if not (os.path.exists(p) or os.path.islink(p)):
1151 if not (os.path.exists(p) or os.path.islink(p)):
1152 self.ui.warn(_("%s does not exist!\n") % dest)
1152 self.ui.warn(_("%s does not exist!\n") % dest)
1153 elif not (os.path.isfile(p) or os.path.islink(p)):
1153 elif not (os.path.isfile(p) or os.path.islink(p)):
1154 self.ui.warn(_("copy failed: %s is not a file or a "
1154 self.ui.warn(_("copy failed: %s is not a file or a "
1155 "symbolic link\n") % dest)
1155 "symbolic link\n") % dest)
1156 else:
1156 else:
1157 wlock = self.wlock()
1157 wlock = self.wlock()
1158 try:
1158 try:
1159 if self.dirstate[dest] in '?r':
1159 if self.dirstate[dest] in '?r':
1160 self.dirstate.add(dest)
1160 self.dirstate.add(dest)
1161 self.dirstate.copy(source, dest)
1161 self.dirstate.copy(source, dest)
1162 finally:
1162 finally:
1163 wlock.release()
1163 wlock.release()
1164
1164
1165 def heads(self, start=None):
1165 def heads(self, start=None):
1166 heads = self.changelog.heads(start)
1166 heads = self.changelog.heads(start)
1167 # sort the output in rev descending order
1167 # sort the output in rev descending order
1168 heads = [(-self.changelog.rev(h), h) for h in heads]
1168 heads = [(-self.changelog.rev(h), h) for h in heads]
1169 return [n for (r, n) in sorted(heads)]
1169 return [n for (r, n) in sorted(heads)]
1170
1170
1171 def branchheads(self, branch=None, start=None, closed=False):
1171 def branchheads(self, branch=None, start=None, closed=False):
1172 '''return a (possibly filtered) list of heads for the given branch
1172 '''return a (possibly filtered) list of heads for the given branch
1173
1173
1174 Heads are returned in topological order, from newest to oldest.
1174 Heads are returned in topological order, from newest to oldest.
1175 If branch is None, use the dirstate branch.
1175 If branch is None, use the dirstate branch.
1176 If start is not None, return only heads reachable from start.
1176 If start is not None, return only heads reachable from start.
1177 If closed is True, return heads that are marked as closed as well.
1177 If closed is True, return heads that are marked as closed as well.
1178 '''
1178 '''
1179 if branch is None:
1179 if branch is None:
1180 branch = self[None].branch()
1180 branch = self[None].branch()
1181 branches = self.branchmap()
1181 branches = self.branchmap()
1182 if branch not in branches:
1182 if branch not in branches:
1183 return []
1183 return []
1184 # the cache returns heads ordered lowest to highest
1184 # the cache returns heads ordered lowest to highest
1185 bheads = list(reversed(branches[branch]))
1185 bheads = list(reversed(branches[branch]))
1186 if start is not None:
1186 if start is not None:
1187 # filter out the heads that cannot be reached from startrev
1187 # filter out the heads that cannot be reached from startrev
1188 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1188 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1189 bheads = [h for h in bheads if h in fbheads]
1189 bheads = [h for h in bheads if h in fbheads]
1190 if not closed:
1190 if not closed:
1191 bheads = [h for h in bheads if
1191 bheads = [h for h in bheads if
1192 ('close' not in self.changelog.read(h)[5])]
1192 ('close' not in self.changelog.read(h)[5])]
1193 return bheads
1193 return bheads
1194
1194
1195 def branches(self, nodes):
1195 def branches(self, nodes):
1196 if not nodes:
1196 if not nodes:
1197 nodes = [self.changelog.tip()]
1197 nodes = [self.changelog.tip()]
1198 b = []
1198 b = []
1199 for n in nodes:
1199 for n in nodes:
1200 t = n
1200 t = n
1201 while 1:
1201 while 1:
1202 p = self.changelog.parents(n)
1202 p = self.changelog.parents(n)
1203 if p[1] != nullid or p[0] == nullid:
1203 if p[1] != nullid or p[0] == nullid:
1204 b.append((t, n, p[0], p[1]))
1204 b.append((t, n, p[0], p[1]))
1205 break
1205 break
1206 n = p[0]
1206 n = p[0]
1207 return b
1207 return b
1208
1208
1209 def between(self, pairs):
1209 def between(self, pairs):
1210 r = []
1210 r = []
1211
1211
1212 for top, bottom in pairs:
1212 for top, bottom in pairs:
1213 n, l, i = top, [], 0
1213 n, l, i = top, [], 0
1214 f = 1
1214 f = 1
1215
1215
1216 while n != bottom and n != nullid:
1216 while n != bottom and n != nullid:
1217 p = self.changelog.parents(n)[0]
1217 p = self.changelog.parents(n)[0]
1218 if i == f:
1218 if i == f:
1219 l.append(n)
1219 l.append(n)
1220 f = f * 2
1220 f = f * 2
1221 n = p
1221 n = p
1222 i += 1
1222 i += 1
1223
1223
1224 r.append(l)
1224 r.append(l)
1225
1225
1226 return r
1226 return r
1227
1227
1228 def findincoming(self, remote, base=None, heads=None, force=False):
1228 def findincoming(self, remote, base=None, heads=None, force=False):
1229 """Return list of roots of the subsets of missing nodes from remote
1229 """Return list of roots of the subsets of missing nodes from remote
1230
1230
1231 If base dict is specified, assume that these nodes and their parents
1231 If base dict is specified, assume that these nodes and their parents
1232 exist on the remote side and that no child of a node of base exists
1232 exist on the remote side and that no child of a node of base exists
1233 in both remote and self.
1233 in both remote and self.
1234 Furthermore base will be updated to include the nodes that exists
1234 Furthermore base will be updated to include the nodes that exists
1235 in self and remote but no children exists in self and remote.
1235 in self and remote but no children exists in self and remote.
1236 If a list of heads is specified, return only nodes which are heads
1236 If a list of heads is specified, return only nodes which are heads
1237 or ancestors of these heads.
1237 or ancestors of these heads.
1238
1238
1239 All the ancestors of base are in self and in remote.
1239 All the ancestors of base are in self and in remote.
1240 All the descendants of the list returned are missing in self.
1240 All the descendants of the list returned are missing in self.
1241 (and so we know that the rest of the nodes are missing in remote, see
1241 (and so we know that the rest of the nodes are missing in remote, see
1242 outgoing)
1242 outgoing)
1243 """
1243 """
1244 return self.findcommonincoming(remote, base, heads, force)[1]
1244 return self.findcommonincoming(remote, base, heads, force)[1]
1245
1245
1246 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1246 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1247 """Return a tuple (common, missing roots, heads) used to identify
1247 """Return a tuple (common, missing roots, heads) used to identify
1248 missing nodes from remote.
1248 missing nodes from remote.
1249
1249
1250 If base dict is specified, assume that these nodes and their parents
1250 If base dict is specified, assume that these nodes and their parents
1251 exist on the remote side and that no child of a node of base exists
1251 exist on the remote side and that no child of a node of base exists
1252 in both remote and self.
1252 in both remote and self.
1253 Furthermore base will be updated to include the nodes that exists
1253 Furthermore base will be updated to include the nodes that exists
1254 in self and remote but no children exists in self and remote.
1254 in self and remote but no children exists in self and remote.
1255 If a list of heads is specified, return only nodes which are heads
1255 If a list of heads is specified, return only nodes which are heads
1256 or ancestors of these heads.
1256 or ancestors of these heads.
1257
1257
1258 All the ancestors of base are in self and in remote.
1258 All the ancestors of base are in self and in remote.
1259 """
1259 """
1260 m = self.changelog.nodemap
1260 m = self.changelog.nodemap
1261 search = []
1261 search = []
1262 fetch = set()
1262 fetch = set()
1263 seen = set()
1263 seen = set()
1264 seenbranch = set()
1264 seenbranch = set()
1265 if base is None:
1265 if base is None:
1266 base = {}
1266 base = {}
1267
1267
1268 if not heads:
1268 if not heads:
1269 heads = remote.heads()
1269 heads = remote.heads()
1270
1270
1271 if self.changelog.tip() == nullid:
1271 if self.changelog.tip() == nullid:
1272 base[nullid] = 1
1272 base[nullid] = 1
1273 if heads != [nullid]:
1273 if heads != [nullid]:
1274 return [nullid], [nullid], list(heads)
1274 return [nullid], [nullid], list(heads)
1275 return [nullid], [], []
1275 return [nullid], [], []
1276
1276
1277 # assume we're closer to the tip than the root
1277 # assume we're closer to the tip than the root
1278 # and start by examining the heads
1278 # and start by examining the heads
1279 self.ui.status(_("searching for changes\n"))
1279 self.ui.status(_("searching for changes\n"))
1280
1280
1281 unknown = []
1281 unknown = []
1282 for h in heads:
1282 for h in heads:
1283 if h not in m:
1283 if h not in m:
1284 unknown.append(h)
1284 unknown.append(h)
1285 else:
1285 else:
1286 base[h] = 1
1286 base[h] = 1
1287
1287
1288 heads = unknown
1288 heads = unknown
1289 if not unknown:
1289 if not unknown:
1290 return base.keys(), [], []
1290 return base.keys(), [], []
1291
1291
1292 req = set(unknown)
1292 req = set(unknown)
1293 reqcnt = 0
1293 reqcnt = 0
1294
1294
1295 # search through remote branches
1295 # search through remote branches
1296 # a 'branch' here is a linear segment of history, with four parts:
1296 # a 'branch' here is a linear segment of history, with four parts:
1297 # head, root, first parent, second parent
1297 # head, root, first parent, second parent
1298 # (a branch always has two parents (or none) by definition)
1298 # (a branch always has two parents (or none) by definition)
1299 unknown = remote.branches(unknown)
1299 unknown = remote.branches(unknown)
1300 while unknown:
1300 while unknown:
1301 r = []
1301 r = []
1302 while unknown:
1302 while unknown:
1303 n = unknown.pop(0)
1303 n = unknown.pop(0)
1304 if n[0] in seen:
1304 if n[0] in seen:
1305 continue
1305 continue
1306
1306
1307 self.ui.debug("examining %s:%s\n"
1307 self.ui.debug("examining %s:%s\n"
1308 % (short(n[0]), short(n[1])))
1308 % (short(n[0]), short(n[1])))
1309 if n[0] == nullid: # found the end of the branch
1309 if n[0] == nullid: # found the end of the branch
1310 pass
1310 pass
1311 elif n in seenbranch:
1311 elif n in seenbranch:
1312 self.ui.debug("branch already found\n")
1312 self.ui.debug("branch already found\n")
1313 continue
1313 continue
1314 elif n[1] and n[1] in m: # do we know the base?
1314 elif n[1] and n[1] in m: # do we know the base?
1315 self.ui.debug("found incomplete branch %s:%s\n"
1315 self.ui.debug("found incomplete branch %s:%s\n"
1316 % (short(n[0]), short(n[1])))
1316 % (short(n[0]), short(n[1])))
1317 search.append(n[0:2]) # schedule branch range for scanning
1317 search.append(n[0:2]) # schedule branch range for scanning
1318 seenbranch.add(n)
1318 seenbranch.add(n)
1319 else:
1319 else:
1320 if n[1] not in seen and n[1] not in fetch:
1320 if n[1] not in seen and n[1] not in fetch:
1321 if n[2] in m and n[3] in m:
1321 if n[2] in m and n[3] in m:
1322 self.ui.debug("found new changeset %s\n" %
1322 self.ui.debug("found new changeset %s\n" %
1323 short(n[1]))
1323 short(n[1]))
1324 fetch.add(n[1]) # earliest unknown
1324 fetch.add(n[1]) # earliest unknown
1325 for p in n[2:4]:
1325 for p in n[2:4]:
1326 if p in m:
1326 if p in m:
1327 base[p] = 1 # latest known
1327 base[p] = 1 # latest known
1328
1328
1329 for p in n[2:4]:
1329 for p in n[2:4]:
1330 if p not in req and p not in m:
1330 if p not in req and p not in m:
1331 r.append(p)
1331 r.append(p)
1332 req.add(p)
1332 req.add(p)
1333 seen.add(n[0])
1333 seen.add(n[0])
1334
1334
1335 if r:
1335 if r:
1336 reqcnt += 1
1336 reqcnt += 1
1337 self.ui.debug("request %d: %s\n" %
1337 self.ui.debug("request %d: %s\n" %
1338 (reqcnt, " ".join(map(short, r))))
1338 (reqcnt, " ".join(map(short, r))))
1339 for p in xrange(0, len(r), 10):
1339 for p in xrange(0, len(r), 10):
1340 for b in remote.branches(r[p:p+10]):
1340 for b in remote.branches(r[p:p+10]):
1341 self.ui.debug("received %s:%s\n" %
1341 self.ui.debug("received %s:%s\n" %
1342 (short(b[0]), short(b[1])))
1342 (short(b[0]), short(b[1])))
1343 unknown.append(b)
1343 unknown.append(b)
1344
1344
1345 # do binary search on the branches we found
1345 # do binary search on the branches we found
1346 while search:
1346 while search:
1347 newsearch = []
1347 newsearch = []
1348 reqcnt += 1
1348 reqcnt += 1
1349 for n, l in zip(search, remote.between(search)):
1349 for n, l in zip(search, remote.between(search)):
1350 l.append(n[1])
1350 l.append(n[1])
1351 p = n[0]
1351 p = n[0]
1352 f = 1
1352 f = 1
1353 for i in l:
1353 for i in l:
1354 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1354 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1355 if i in m:
1355 if i in m:
1356 if f <= 2:
1356 if f <= 2:
1357 self.ui.debug("found new branch changeset %s\n" %
1357 self.ui.debug("found new branch changeset %s\n" %
1358 short(p))
1358 short(p))
1359 fetch.add(p)
1359 fetch.add(p)
1360 base[i] = 1
1360 base[i] = 1
1361 else:
1361 else:
1362 self.ui.debug("narrowed branch search to %s:%s\n"
1362 self.ui.debug("narrowed branch search to %s:%s\n"
1363 % (short(p), short(i)))
1363 % (short(p), short(i)))
1364 newsearch.append((p, i))
1364 newsearch.append((p, i))
1365 break
1365 break
1366 p, f = i, f * 2
1366 p, f = i, f * 2
1367 search = newsearch
1367 search = newsearch
1368
1368
1369 # sanity check our fetch list
1369 # sanity check our fetch list
1370 for f in fetch:
1370 for f in fetch:
1371 if f in m:
1371 if f in m:
1372 raise error.RepoError(_("already have changeset ")
1372 raise error.RepoError(_("already have changeset ")
1373 + short(f[:4]))
1373 + short(f[:4]))
1374
1374
1375 if base.keys() == [nullid]:
1375 if base.keys() == [nullid]:
1376 if force:
1376 if force:
1377 self.ui.warn(_("warning: repository is unrelated\n"))
1377 self.ui.warn(_("warning: repository is unrelated\n"))
1378 else:
1378 else:
1379 raise util.Abort(_("repository is unrelated"))
1379 raise util.Abort(_("repository is unrelated"))
1380
1380
1381 self.ui.debug("found new changesets starting at " +
1381 self.ui.debug("found new changesets starting at " +
1382 " ".join([short(f) for f in fetch]) + "\n")
1382 " ".join([short(f) for f in fetch]) + "\n")
1383
1383
1384 self.ui.debug("%d total queries\n" % reqcnt)
1384 self.ui.debug("%d total queries\n" % reqcnt)
1385
1385
1386 return base.keys(), list(fetch), heads
1386 return base.keys(), list(fetch), heads
1387
1387
1388 def findoutgoing(self, remote, base=None, heads=None, force=False):
1388 def findoutgoing(self, remote, base=None, heads=None, force=False):
1389 """Return list of nodes that are roots of subsets not in remote
1389 """Return list of nodes that are roots of subsets not in remote
1390
1390
1391 If base dict is specified, assume that these nodes and their parents
1391 If base dict is specified, assume that these nodes and their parents
1392 exist on the remote side.
1392 exist on the remote side.
1393 If a list of heads is specified, return only nodes which are heads
1393 If a list of heads is specified, return only nodes which are heads
1394 or ancestors of these heads, and return a second element which
1394 or ancestors of these heads, and return a second element which
1395 contains all remote heads which get new children.
1395 contains all remote heads which get new children.
1396 """
1396 """
1397 if base is None:
1397 if base is None:
1398 base = {}
1398 base = {}
1399 self.findincoming(remote, base, heads, force=force)
1399 self.findincoming(remote, base, heads, force=force)
1400
1400
1401 self.ui.debug("common changesets up to "
1401 self.ui.debug("common changesets up to "
1402 + " ".join(map(short, base.keys())) + "\n")
1402 + " ".join(map(short, base.keys())) + "\n")
1403
1403
1404 remain = set(self.changelog.nodemap)
1404 remain = set(self.changelog.nodemap)
1405
1405
1406 # prune everything remote has from the tree
1406 # prune everything remote has from the tree
1407 remain.remove(nullid)
1407 remain.remove(nullid)
1408 remove = base.keys()
1408 remove = base.keys()
1409 while remove:
1409 while remove:
1410 n = remove.pop(0)
1410 n = remove.pop(0)
1411 if n in remain:
1411 if n in remain:
1412 remain.remove(n)
1412 remain.remove(n)
1413 for p in self.changelog.parents(n):
1413 for p in self.changelog.parents(n):
1414 remove.append(p)
1414 remove.append(p)
1415
1415
1416 # find every node whose parents have been pruned
1416 # find every node whose parents have been pruned
1417 subset = []
1417 subset = []
1418 # find every remote head that will get new children
1418 # find every remote head that will get new children
1419 updated_heads = set()
1419 updated_heads = set()
1420 for n in remain:
1420 for n in remain:
1421 p1, p2 = self.changelog.parents(n)
1421 p1, p2 = self.changelog.parents(n)
1422 if p1 not in remain and p2 not in remain:
1422 if p1 not in remain and p2 not in remain:
1423 subset.append(n)
1423 subset.append(n)
1424 if heads:
1424 if heads:
1425 if p1 in heads:
1425 if p1 in heads:
1426 updated_heads.add(p1)
1426 updated_heads.add(p1)
1427 if p2 in heads:
1427 if p2 in heads:
1428 updated_heads.add(p2)
1428 updated_heads.add(p2)
1429
1429
1430 # this is the set of all roots we have to push
1430 # this is the set of all roots we have to push
1431 if heads:
1431 if heads:
1432 return subset, list(updated_heads)
1432 return subset, list(updated_heads)
1433 else:
1433 else:
1434 return subset
1434 return subset
1435
1435
1436 def pull(self, remote, heads=None, force=False):
1436 def pull(self, remote, heads=None, force=False):
1437 lock = self.lock()
1437 lock = self.lock()
1438 try:
1438 try:
1439 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1439 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1440 force=force)
1440 force=force)
1441 if fetch == [nullid]:
1441 if fetch == [nullid]:
1442 self.ui.status(_("requesting all changes\n"))
1442 self.ui.status(_("requesting all changes\n"))
1443
1443
1444 if not fetch:
1444 if not fetch:
1445 self.ui.status(_("no changes found\n"))
1445 self.ui.status(_("no changes found\n"))
1446 return 0
1446 return 0
1447
1447
1448 if heads is None and remote.capable('changegroupsubset'):
1448 if heads is None and remote.capable('changegroupsubset'):
1449 heads = rheads
1449 heads = rheads
1450
1450
1451 if heads is None:
1451 if heads is None:
1452 cg = remote.changegroup(fetch, 'pull')
1452 cg = remote.changegroup(fetch, 'pull')
1453 else:
1453 else:
1454 if not remote.capable('changegroupsubset'):
1454 if not remote.capable('changegroupsubset'):
1455 raise util.Abort(_("Partial pull cannot be done because "
1455 raise util.Abort(_("Partial pull cannot be done because "
1456 "other repository doesn't support "
1456 "other repository doesn't support "
1457 "changegroupsubset."))
1457 "changegroupsubset."))
1458 cg = remote.changegroupsubset(fetch, heads, 'pull')
1458 cg = remote.changegroupsubset(fetch, heads, 'pull')
1459 return self.addchangegroup(cg, 'pull', remote.url())
1459 return self.addchangegroup(cg, 'pull', remote.url())
1460 finally:
1460 finally:
1461 lock.release()
1461 lock.release()
1462
1462
1463 def push(self, remote, force=False, revs=None):
1463 def push(self, remote, force=False, revs=None):
1464 # there are two ways to push to remote repo:
1464 # there are two ways to push to remote repo:
1465 #
1465 #
1466 # addchangegroup assumes local user can lock remote
1466 # addchangegroup assumes local user can lock remote
1467 # repo (local filesystem, old ssh servers).
1467 # repo (local filesystem, old ssh servers).
1468 #
1468 #
1469 # unbundle assumes local user cannot lock remote repo (new ssh
1469 # unbundle assumes local user cannot lock remote repo (new ssh
1470 # servers, http servers).
1470 # servers, http servers).
1471
1471
1472 if remote.capable('unbundle'):
1472 if remote.capable('unbundle'):
1473 return self.push_unbundle(remote, force, revs)
1473 return self.push_unbundle(remote, force, revs)
1474 return self.push_addchangegroup(remote, force, revs)
1474 return self.push_addchangegroup(remote, force, revs)
1475
1475
1476 def prepush(self, remote, force, revs):
1476 def prepush(self, remote, force, revs):
1477 '''Analyze the local and remote repositories and determine which
1477 '''Analyze the local and remote repositories and determine which
1478 changesets need to be pushed to the remote. Return a tuple
1478 changesets need to be pushed to the remote. Return a tuple
1479 (changegroup, remoteheads). changegroup is a readable file-like
1479 (changegroup, remoteheads). changegroup is a readable file-like
1480 object whose read() returns successive changegroup chunks ready to
1480 object whose read() returns successive changegroup chunks ready to
1481 be sent over the wire. remoteheads is the list of remote heads.
1481 be sent over the wire. remoteheads is the list of remote heads.
1482 '''
1482 '''
1483 common = {}
1483 common = {}
1484 remote_heads = remote.heads()
1484 remote_heads = remote.heads()
1485 inc = self.findincoming(remote, common, remote_heads, force=force)
1485 inc = self.findincoming(remote, common, remote_heads, force=force)
1486
1486
1487 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1487 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1488 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1488 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1489
1489
1490 def checkbranch(lheads, rheads, updatelb):
1490 def checkbranch(lheads, rheads, updatelb):
1491 '''
1491 '''
1492 check whether there are more local heads than remote heads on
1492 check whether there are more local heads than remote heads on
1493 a specific branch.
1493 a specific branch.
1494
1494
1495 lheads: local branch heads
1495 lheads: local branch heads
1496 rheads: remote branch heads
1496 rheads: remote branch heads
1497 updatelb: outgoing local branch bases
1497 updatelb: outgoing local branch bases
1498 '''
1498 '''
1499
1499
1500 warn = 0
1500 warn = 0
1501
1501
1502 if not revs and len(lheads) > len(rheads):
1502 if not revs and len(lheads) > len(rheads):
1503 warn = 1
1503 warn = 1
1504 else:
1504 else:
1505 # add local heads involved in the push
1505 # add local heads involved in the push
1506 updatelheads = [self.changelog.heads(x, lheads)
1506 updatelheads = [self.changelog.heads(x, lheads)
1507 for x in updatelb]
1507 for x in updatelb]
1508 newheads = set(sum(updatelheads, [])) & set(lheads)
1508 newheads = set(sum(updatelheads, [])) & set(lheads)
1509
1509
1510 if not newheads:
1510 if not newheads:
1511 return True
1511 return True
1512
1512
1513 # add heads we don't have or that are not involved in the push
1513 # add heads we don't have or that are not involved in the push
1514 for r in rheads:
1514 for r in rheads:
1515 if r in self.changelog.nodemap:
1515 if r in self.changelog.nodemap:
1516 desc = self.changelog.heads(r, heads)
1516 desc = self.changelog.heads(r, heads)
1517 l = [h for h in heads if h in desc]
1517 l = [h for h in heads if h in desc]
1518 if not l:
1518 if not l:
1519 newheads.add(r)
1519 newheads.add(r)
1520 else:
1520 else:
1521 newheads.add(r)
1521 newheads.add(r)
1522 if len(newheads) > len(rheads):
1522 if len(newheads) > len(rheads):
1523 warn = 1
1523 warn = 1
1524
1524
1525 if warn:
1525 if warn:
1526 if not rheads: # new branch requires --force
1526 if not rheads: # new branch requires --force
1527 self.ui.warn(_("abort: push creates new"
1527 self.ui.warn(_("abort: push creates new"
1528 " remote branch '%s'!\n") %
1528 " remote branch '%s'!\n") %
1529 self[lheads[0]].branch())
1529 self[lheads[0]].branch())
1530 else:
1530 else:
1531 self.ui.warn(_("abort: push creates new remote heads!\n"))
1531 self.ui.warn(_("abort: push creates new remote heads!\n"))
1532
1532
1533 self.ui.status(_("(did you forget to merge?"
1533 self.ui.status(_("(did you forget to merge?"
1534 " use push -f to force)\n"))
1534 " use push -f to force)\n"))
1535 return False
1535 return False
1536 return True
1536 return True
1537
1537
1538 if not bases:
1538 if not bases:
1539 self.ui.status(_("no changes found\n"))
1539 self.ui.status(_("no changes found\n"))
1540 return None, 1
1540 return None, 1
1541 elif not force:
1541 elif not force:
1542 # Check for each named branch if we're creating new remote heads.
1542 # Check for each named branch if we're creating new remote heads.
1543 # To be a remote head after push, node must be either:
1543 # To be a remote head after push, node must be either:
1544 # - unknown locally
1544 # - unknown locally
1545 # - a local outgoing head descended from update
1545 # - a local outgoing head descended from update
1546 # - a remote head that's known locally and not
1546 # - a remote head that's known locally and not
1547 # ancestral to an outgoing head
1547 # ancestral to an outgoing head
1548 #
1548 #
1549 # New named branches cannot be created without --force.
1549 # New named branches cannot be created without --force.
1550
1550
1551 if remote_heads != [nullid]:
1551 if remote_heads != [nullid]:
1552 if remote.capable('branchmap'):
1552 if remote.capable('branchmap'):
1553 localhds = {}
1553 localhds = {}
1554 if not revs:
1554 if not revs:
1555 localhds = self.branchmap()
1555 localhds = self.branchmap()
1556 else:
1556 else:
1557 for n in heads:
1557 for n in heads:
1558 branch = self[n].branch()
1558 branch = self[n].branch()
1559 if branch in localhds:
1559 if branch in localhds:
1560 localhds[branch].append(n)
1560 localhds[branch].append(n)
1561 else:
1561 else:
1562 localhds[branch] = [n]
1562 localhds[branch] = [n]
1563
1563
1564 remotehds = remote.branchmap()
1564 remotehds = remote.branchmap()
1565
1565
1566 for lh in localhds:
1566 for lh in localhds:
1567 if lh in remotehds:
1567 if lh in remotehds:
1568 rheads = remotehds[lh]
1568 rheads = remotehds[lh]
1569 else:
1569 else:
1570 rheads = []
1570 rheads = []
1571 lheads = localhds[lh]
1571 lheads = localhds[lh]
1572 if not checkbranch(lheads, rheads, update):
1572 if not checkbranch(lheads, rheads, update):
1573 return None, 0
1573 return None, 0
1574 else:
1574 else:
1575 if not checkbranch(heads, remote_heads, update):
1575 if not checkbranch(heads, remote_heads, update):
1576 return None, 0
1576 return None, 0
1577
1577
1578 if inc:
1578 if inc:
1579 self.ui.warn(_("note: unsynced remote changes!\n"))
1579 self.ui.warn(_("note: unsynced remote changes!\n"))
1580
1580
1581
1581
1582 if revs is None:
1582 if revs is None:
1583 # use the fast path, no race possible on push
1583 # use the fast path, no race possible on push
1584 nodes = self.changelog.findmissing(common.keys())
1584 nodes = self.changelog.findmissing(common.keys())
1585 cg = self._changegroup(nodes, 'push')
1585 cg = self._changegroup(nodes, 'push')
1586 else:
1586 else:
1587 cg = self.changegroupsubset(update, revs, 'push')
1587 cg = self.changegroupsubset(update, revs, 'push')
1588 return cg, remote_heads
1588 return cg, remote_heads
1589
1589
1590 def push_addchangegroup(self, remote, force, revs):
1590 def push_addchangegroup(self, remote, force, revs):
1591 lock = remote.lock()
1591 lock = remote.lock()
1592 try:
1592 try:
1593 ret = self.prepush(remote, force, revs)
1593 ret = self.prepush(remote, force, revs)
1594 if ret[0] is not None:
1594 if ret[0] is not None:
1595 cg, remote_heads = ret
1595 cg, remote_heads = ret
1596 return remote.addchangegroup(cg, 'push', self.url())
1596 return remote.addchangegroup(cg, 'push', self.url())
1597 return ret[1]
1597 return ret[1]
1598 finally:
1598 finally:
1599 lock.release()
1599 lock.release()
1600
1600
1601 def push_unbundle(self, remote, force, revs):
1601 def push_unbundle(self, remote, force, revs):
1602 # local repo finds heads on server, finds out what revs it
1602 # local repo finds heads on server, finds out what revs it
1603 # must push. once revs transferred, if server finds it has
1603 # must push. once revs transferred, if server finds it has
1604 # different heads (someone else won commit/push race), server
1604 # different heads (someone else won commit/push race), server
1605 # aborts.
1605 # aborts.
1606
1606
1607 ret = self.prepush(remote, force, revs)
1607 ret = self.prepush(remote, force, revs)
1608 if ret[0] is not None:
1608 if ret[0] is not None:
1609 cg, remote_heads = ret
1609 cg, remote_heads = ret
1610 if force: remote_heads = ['force']
1610 if force: remote_heads = ['force']
1611 return remote.unbundle(cg, remote_heads, 'push')
1611 return remote.unbundle(cg, remote_heads, 'push')
1612 return ret[1]
1612 return ret[1]
1613
1613
1614 def changegroupinfo(self, nodes, source):
1614 def changegroupinfo(self, nodes, source):
1615 if self.ui.verbose or source == 'bundle':
1615 if self.ui.verbose or source == 'bundle':
1616 self.ui.status(_("%d changesets found\n") % len(nodes))
1616 self.ui.status(_("%d changesets found\n") % len(nodes))
1617 if self.ui.debugflag:
1617 if self.ui.debugflag:
1618 self.ui.debug("list of changesets:\n")
1618 self.ui.debug("list of changesets:\n")
1619 for node in nodes:
1619 for node in nodes:
1620 self.ui.debug("%s\n" % hex(node))
1620 self.ui.debug("%s\n" % hex(node))
1621
1621
1622 def changegroupsubset(self, bases, heads, source, extranodes=None):
1622 def changegroupsubset(self, bases, heads, source, extranodes=None):
1623 """Compute a changegroup consisting of all the nodes that are
1623 """Compute a changegroup consisting of all the nodes that are
1624 descendents of any of the bases and ancestors of any of the heads.
1624 descendents of any of the bases and ancestors of any of the heads.
1625 Return a chunkbuffer object whose read() method will return
1625 Return a chunkbuffer object whose read() method will return
1626 successive changegroup chunks.
1626 successive changegroup chunks.
1627
1627
1628 It is fairly complex as determining which filenodes and which
1628 It is fairly complex as determining which filenodes and which
1629 manifest nodes need to be included for the changeset to be complete
1629 manifest nodes need to be included for the changeset to be complete
1630 is non-trivial.
1630 is non-trivial.
1631
1631
1632 Another wrinkle is doing the reverse, figuring out which changeset in
1632 Another wrinkle is doing the reverse, figuring out which changeset in
1633 the changegroup a particular filenode or manifestnode belongs to.
1633 the changegroup a particular filenode or manifestnode belongs to.
1634
1634
1635 The caller can specify some nodes that must be included in the
1635 The caller can specify some nodes that must be included in the
1636 changegroup using the extranodes argument. It should be a dict
1636 changegroup using the extranodes argument. It should be a dict
1637 where the keys are the filenames (or 1 for the manifest), and the
1637 where the keys are the filenames (or 1 for the manifest), and the
1638 values are lists of (node, linknode) tuples, where node is a wanted
1638 values are lists of (node, linknode) tuples, where node is a wanted
1639 node and linknode is the changelog node that should be transmitted as
1639 node and linknode is the changelog node that should be transmitted as
1640 the linkrev.
1640 the linkrev.
1641 """
1641 """
1642
1642
1643 # Set up some initial variables
1643 # Set up some initial variables
1644 # Make it easy to refer to self.changelog
1644 # Make it easy to refer to self.changelog
1645 cl = self.changelog
1645 cl = self.changelog
1646 # msng is short for missing - compute the list of changesets in this
1646 # msng is short for missing - compute the list of changesets in this
1647 # changegroup.
1647 # changegroup.
1648 if not bases:
1648 if not bases:
1649 bases = [nullid]
1649 bases = [nullid]
1650 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1650 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1651
1651
1652 if extranodes is None:
1652 if extranodes is None:
1653 # can we go through the fast path ?
1653 # can we go through the fast path ?
1654 heads.sort()
1654 heads.sort()
1655 allheads = self.heads()
1655 allheads = self.heads()
1656 allheads.sort()
1656 allheads.sort()
1657 if heads == allheads:
1657 if heads == allheads:
1658 return self._changegroup(msng_cl_lst, source)
1658 return self._changegroup(msng_cl_lst, source)
1659
1659
1660 # slow path
1660 # slow path
1661 self.hook('preoutgoing', throw=True, source=source)
1661 self.hook('preoutgoing', throw=True, source=source)
1662
1662
1663 self.changegroupinfo(msng_cl_lst, source)
1663 self.changegroupinfo(msng_cl_lst, source)
1664 # Some bases may turn out to be superfluous, and some heads may be
1664 # Some bases may turn out to be superfluous, and some heads may be
1665 # too. nodesbetween will return the minimal set of bases and heads
1665 # too. nodesbetween will return the minimal set of bases and heads
1666 # necessary to re-create the changegroup.
1666 # necessary to re-create the changegroup.
1667
1667
1668 # Known heads are the list of heads that it is assumed the recipient
1668 # Known heads are the list of heads that it is assumed the recipient
1669 # of this changegroup will know about.
1669 # of this changegroup will know about.
1670 knownheads = set()
1670 knownheads = set()
1671 # We assume that all parents of bases are known heads.
1671 # We assume that all parents of bases are known heads.
1672 for n in bases:
1672 for n in bases:
1673 knownheads.update(cl.parents(n))
1673 knownheads.update(cl.parents(n))
1674 knownheads.discard(nullid)
1674 knownheads.discard(nullid)
1675 knownheads = list(knownheads)
1675 knownheads = list(knownheads)
1676 if knownheads:
1676 if knownheads:
1677 # Now that we know what heads are known, we can compute which
1677 # Now that we know what heads are known, we can compute which
1678 # changesets are known. The recipient must know about all
1678 # changesets are known. The recipient must know about all
1679 # changesets required to reach the known heads from the null
1679 # changesets required to reach the known heads from the null
1680 # changeset.
1680 # changeset.
1681 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1681 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1682 junk = None
1682 junk = None
1683 # Transform the list into a set.
1683 # Transform the list into a set.
1684 has_cl_set = set(has_cl_set)
1684 has_cl_set = set(has_cl_set)
1685 else:
1685 else:
1686 # If there were no known heads, the recipient cannot be assumed to
1686 # If there were no known heads, the recipient cannot be assumed to
1687 # know about any changesets.
1687 # know about any changesets.
1688 has_cl_set = set()
1688 has_cl_set = set()
1689
1689
1690 # Make it easy to refer to self.manifest
1690 # Make it easy to refer to self.manifest
1691 mnfst = self.manifest
1691 mnfst = self.manifest
1692 # We don't know which manifests are missing yet
1692 # We don't know which manifests are missing yet
1693 msng_mnfst_set = {}
1693 msng_mnfst_set = {}
1694 # Nor do we know which filenodes are missing.
1694 # Nor do we know which filenodes are missing.
1695 msng_filenode_set = {}
1695 msng_filenode_set = {}
1696
1696
1697 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1697 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1698 junk = None
1698 junk = None
1699
1699
1700 # A changeset always belongs to itself, so the changenode lookup
1700 # A changeset always belongs to itself, so the changenode lookup
1701 # function for a changenode is identity.
1701 # function for a changenode is identity.
1702 def identity(x):
1702 def identity(x):
1703 return x
1703 return x
1704
1704
1705 # If we determine that a particular file or manifest node must be a
1705 # If we determine that a particular file or manifest node must be a
1706 # node that the recipient of the changegroup will already have, we can
1706 # node that the recipient of the changegroup will already have, we can
1707 # also assume the recipient will have all the parents. This function
1707 # also assume the recipient will have all the parents. This function
1708 # prunes them from the set of missing nodes.
1708 # prunes them from the set of missing nodes.
1709 def prune_parents(revlog, hasset, msngset):
1709 def prune_parents(revlog, hasset, msngset):
1710 haslst = list(hasset)
1710 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1711 haslst.sort(key=revlog.rev)
1711 msngset.pop(revlog.node(r), None)
1712 for node in haslst:
1713 parentlst = [p for p in revlog.parents(node) if p != nullid]
1714 while parentlst:
1715 n = parentlst.pop()
1716 if n not in hasset:
1717 hasset.add(n)
1718 p = [p for p in revlog.parents(n) if p != nullid]
1719 parentlst.extend(p)
1720 for n in hasset:
1721 msngset.pop(n, None)
1722
1712
1723 # This is a function generating function used to set up an environment
1713 # This is a function generating function used to set up an environment
1724 # for the inner function to execute in.
1714 # for the inner function to execute in.
1725 def manifest_and_file_collector(changedfileset):
1715 def manifest_and_file_collector(changedfileset):
1726 # This is an information gathering function that gathers
1716 # This is an information gathering function that gathers
1727 # information from each changeset node that goes out as part of
1717 # information from each changeset node that goes out as part of
1728 # the changegroup. The information gathered is a list of which
1718 # the changegroup. The information gathered is a list of which
1729 # manifest nodes are potentially required (the recipient may
1719 # manifest nodes are potentially required (the recipient may
1730 # already have them) and total list of all files which were
1720 # already have them) and total list of all files which were
1731 # changed in any changeset in the changegroup.
1721 # changed in any changeset in the changegroup.
1732 #
1722 #
1733 # We also remember the first changenode we saw any manifest
1723 # We also remember the first changenode we saw any manifest
1734 # referenced by so we can later determine which changenode 'owns'
1724 # referenced by so we can later determine which changenode 'owns'
1735 # the manifest.
1725 # the manifest.
1736 def collect_manifests_and_files(clnode):
1726 def collect_manifests_and_files(clnode):
1737 c = cl.read(clnode)
1727 c = cl.read(clnode)
1738 for f in c[3]:
1728 for f in c[3]:
1739 # This is to make sure we only have one instance of each
1729 # This is to make sure we only have one instance of each
1740 # filename string for each filename.
1730 # filename string for each filename.
1741 changedfileset.setdefault(f, f)
1731 changedfileset.setdefault(f, f)
1742 msng_mnfst_set.setdefault(c[0], clnode)
1732 msng_mnfst_set.setdefault(c[0], clnode)
1743 return collect_manifests_and_files
1733 return collect_manifests_and_files
1744
1734
1745 # Figure out which manifest nodes (of the ones we think might be part
1735 # Figure out which manifest nodes (of the ones we think might be part
1746 # of the changegroup) the recipient must know about and remove them
1736 # of the changegroup) the recipient must know about and remove them
1747 # from the changegroup.
1737 # from the changegroup.
1748 def prune_manifests():
1738 def prune_manifests():
1749 has_mnfst_set = set()
1739 has_mnfst_set = set()
1750 for n in msng_mnfst_set:
1740 for n in msng_mnfst_set:
1751 # If a 'missing' manifest thinks it belongs to a changenode
1741 # If a 'missing' manifest thinks it belongs to a changenode
1752 # the recipient is assumed to have, obviously the recipient
1742 # the recipient is assumed to have, obviously the recipient
1753 # must have that manifest.
1743 # must have that manifest.
1754 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1744 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1755 if linknode in has_cl_set:
1745 if linknode in has_cl_set:
1756 has_mnfst_set.add(n)
1746 has_mnfst_set.add(n)
1757 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1747 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1758
1748
1759 # Use the information collected in collect_manifests_and_files to say
1749 # Use the information collected in collect_manifests_and_files to say
1760 # which changenode any manifestnode belongs to.
1750 # which changenode any manifestnode belongs to.
1761 def lookup_manifest_link(mnfstnode):
1751 def lookup_manifest_link(mnfstnode):
1762 return msng_mnfst_set[mnfstnode]
1752 return msng_mnfst_set[mnfstnode]
1763
1753
1764 # A function generating function that sets up the initial environment
1754 # A function generating function that sets up the initial environment
1765 # the inner function.
1755 # the inner function.
1766 def filenode_collector(changedfiles):
1756 def filenode_collector(changedfiles):
1767 next_rev = [0]
1757 next_rev = [0]
1768 # This gathers information from each manifestnode included in the
1758 # This gathers information from each manifestnode included in the
1769 # changegroup about which filenodes the manifest node references
1759 # changegroup about which filenodes the manifest node references
1770 # so we can include those in the changegroup too.
1760 # so we can include those in the changegroup too.
1771 #
1761 #
1772 # It also remembers which changenode each filenode belongs to. It
1762 # It also remembers which changenode each filenode belongs to. It
1773 # does this by assuming the a filenode belongs to the changenode
1763 # does this by assuming the a filenode belongs to the changenode
1774 # the first manifest that references it belongs to.
1764 # the first manifest that references it belongs to.
1775 def collect_msng_filenodes(mnfstnode):
1765 def collect_msng_filenodes(mnfstnode):
1776 r = mnfst.rev(mnfstnode)
1766 r = mnfst.rev(mnfstnode)
1777 if r == next_rev[0]:
1767 if r == next_rev[0]:
1778 # If the last rev we looked at was the one just previous,
1768 # If the last rev we looked at was the one just previous,
1779 # we only need to see a diff.
1769 # we only need to see a diff.
1780 deltamf = mnfst.readdelta(mnfstnode)
1770 deltamf = mnfst.readdelta(mnfstnode)
1781 # For each line in the delta
1771 # For each line in the delta
1782 for f, fnode in deltamf.iteritems():
1772 for f, fnode in deltamf.iteritems():
1783 f = changedfiles.get(f, None)
1773 f = changedfiles.get(f, None)
1784 # And if the file is in the list of files we care
1774 # And if the file is in the list of files we care
1785 # about.
1775 # about.
1786 if f is not None:
1776 if f is not None:
1787 # Get the changenode this manifest belongs to
1777 # Get the changenode this manifest belongs to
1788 clnode = msng_mnfst_set[mnfstnode]
1778 clnode = msng_mnfst_set[mnfstnode]
1789 # Create the set of filenodes for the file if
1779 # Create the set of filenodes for the file if
1790 # there isn't one already.
1780 # there isn't one already.
1791 ndset = msng_filenode_set.setdefault(f, {})
1781 ndset = msng_filenode_set.setdefault(f, {})
1792 # And set the filenode's changelog node to the
1782 # And set the filenode's changelog node to the
1793 # manifest's if it hasn't been set already.
1783 # manifest's if it hasn't been set already.
1794 ndset.setdefault(fnode, clnode)
1784 ndset.setdefault(fnode, clnode)
1795 else:
1785 else:
1796 # Otherwise we need a full manifest.
1786 # Otherwise we need a full manifest.
1797 m = mnfst.read(mnfstnode)
1787 m = mnfst.read(mnfstnode)
1798 # For every file in we care about.
1788 # For every file in we care about.
1799 for f in changedfiles:
1789 for f in changedfiles:
1800 fnode = m.get(f, None)
1790 fnode = m.get(f, None)
1801 # If it's in the manifest
1791 # If it's in the manifest
1802 if fnode is not None:
1792 if fnode is not None:
1803 # See comments above.
1793 # See comments above.
1804 clnode = msng_mnfst_set[mnfstnode]
1794 clnode = msng_mnfst_set[mnfstnode]
1805 ndset = msng_filenode_set.setdefault(f, {})
1795 ndset = msng_filenode_set.setdefault(f, {})
1806 ndset.setdefault(fnode, clnode)
1796 ndset.setdefault(fnode, clnode)
1807 # Remember the revision we hope to see next.
1797 # Remember the revision we hope to see next.
1808 next_rev[0] = r + 1
1798 next_rev[0] = r + 1
1809 return collect_msng_filenodes
1799 return collect_msng_filenodes
1810
1800
1811 # We have a list of filenodes we think we need for a file, lets remove
1801 # We have a list of filenodes we think we need for a file, lets remove
1812 # all those we know the recipient must have.
1802 # all those we know the recipient must have.
1813 def prune_filenodes(f, filerevlog):
1803 def prune_filenodes(f, filerevlog):
1814 msngset = msng_filenode_set[f]
1804 msngset = msng_filenode_set[f]
1815 hasset = set()
1805 hasset = set()
1816 # If a 'missing' filenode thinks it belongs to a changenode we
1806 # If a 'missing' filenode thinks it belongs to a changenode we
1817 # assume the recipient must have, then the recipient must have
1807 # assume the recipient must have, then the recipient must have
1818 # that filenode.
1808 # that filenode.
1819 for n in msngset:
1809 for n in msngset:
1820 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1810 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1821 if clnode in has_cl_set:
1811 if clnode in has_cl_set:
1822 hasset.add(n)
1812 hasset.add(n)
1823 prune_parents(filerevlog, hasset, msngset)
1813 prune_parents(filerevlog, hasset, msngset)
1824
1814
1825 # A function generator function that sets up the a context for the
1815 # A function generator function that sets up the a context for the
1826 # inner function.
1816 # inner function.
1827 def lookup_filenode_link_func(fname):
1817 def lookup_filenode_link_func(fname):
1828 msngset = msng_filenode_set[fname]
1818 msngset = msng_filenode_set[fname]
1829 # Lookup the changenode the filenode belongs to.
1819 # Lookup the changenode the filenode belongs to.
1830 def lookup_filenode_link(fnode):
1820 def lookup_filenode_link(fnode):
1831 return msngset[fnode]
1821 return msngset[fnode]
1832 return lookup_filenode_link
1822 return lookup_filenode_link
1833
1823
1834 # Add the nodes that were explicitly requested.
1824 # Add the nodes that were explicitly requested.
1835 def add_extra_nodes(name, nodes):
1825 def add_extra_nodes(name, nodes):
1836 if not extranodes or name not in extranodes:
1826 if not extranodes or name not in extranodes:
1837 return
1827 return
1838
1828
1839 for node, linknode in extranodes[name]:
1829 for node, linknode in extranodes[name]:
1840 if node not in nodes:
1830 if node not in nodes:
1841 nodes[node] = linknode
1831 nodes[node] = linknode
1842
1832
1843 # Now that we have all theses utility functions to help out and
1833 # Now that we have all theses utility functions to help out and
1844 # logically divide up the task, generate the group.
1834 # logically divide up the task, generate the group.
1845 def gengroup():
1835 def gengroup():
1846 # The set of changed files starts empty.
1836 # The set of changed files starts empty.
1847 changedfiles = {}
1837 changedfiles = {}
1848 # Create a changenode group generator that will call our functions
1838 # Create a changenode group generator that will call our functions
1849 # back to lookup the owning changenode and collect information.
1839 # back to lookup the owning changenode and collect information.
1850 group = cl.group(msng_cl_lst, identity,
1840 group = cl.group(msng_cl_lst, identity,
1851 manifest_and_file_collector(changedfiles))
1841 manifest_and_file_collector(changedfiles))
1852 for chnk in group:
1842 for chnk in group:
1853 yield chnk
1843 yield chnk
1854
1844
1855 # The list of manifests has been collected by the generator
1845 # The list of manifests has been collected by the generator
1856 # calling our functions back.
1846 # calling our functions back.
1857 prune_manifests()
1847 prune_manifests()
1858 add_extra_nodes(1, msng_mnfst_set)
1848 add_extra_nodes(1, msng_mnfst_set)
1859 msng_mnfst_lst = msng_mnfst_set.keys()
1849 msng_mnfst_lst = msng_mnfst_set.keys()
1860 # Sort the manifestnodes by revision number.
1850 # Sort the manifestnodes by revision number.
1861 msng_mnfst_lst.sort(key=mnfst.rev)
1851 msng_mnfst_lst.sort(key=mnfst.rev)
1862 # Create a generator for the manifestnodes that calls our lookup
1852 # Create a generator for the manifestnodes that calls our lookup
1863 # and data collection functions back.
1853 # and data collection functions back.
1864 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1854 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1865 filenode_collector(changedfiles))
1855 filenode_collector(changedfiles))
1866 for chnk in group:
1856 for chnk in group:
1867 yield chnk
1857 yield chnk
1868
1858
1869 # These are no longer needed, dereference and toss the memory for
1859 # These are no longer needed, dereference and toss the memory for
1870 # them.
1860 # them.
1871 msng_mnfst_lst = None
1861 msng_mnfst_lst = None
1872 msng_mnfst_set.clear()
1862 msng_mnfst_set.clear()
1873
1863
1874 if extranodes:
1864 if extranodes:
1875 for fname in extranodes:
1865 for fname in extranodes:
1876 if isinstance(fname, int):
1866 if isinstance(fname, int):
1877 continue
1867 continue
1878 msng_filenode_set.setdefault(fname, {})
1868 msng_filenode_set.setdefault(fname, {})
1879 changedfiles[fname] = 1
1869 changedfiles[fname] = 1
1880 # Go through all our files in order sorted by name.
1870 # Go through all our files in order sorted by name.
1881 for fname in sorted(changedfiles):
1871 for fname in sorted(changedfiles):
1882 filerevlog = self.file(fname)
1872 filerevlog = self.file(fname)
1883 if not len(filerevlog):
1873 if not len(filerevlog):
1884 raise util.Abort(_("empty or missing revlog for %s") % fname)
1874 raise util.Abort(_("empty or missing revlog for %s") % fname)
1885 # Toss out the filenodes that the recipient isn't really
1875 # Toss out the filenodes that the recipient isn't really
1886 # missing.
1876 # missing.
1887 if fname in msng_filenode_set:
1877 if fname in msng_filenode_set:
1888 prune_filenodes(fname, filerevlog)
1878 prune_filenodes(fname, filerevlog)
1889 add_extra_nodes(fname, msng_filenode_set[fname])
1879 add_extra_nodes(fname, msng_filenode_set[fname])
1890 msng_filenode_lst = msng_filenode_set[fname].keys()
1880 msng_filenode_lst = msng_filenode_set[fname].keys()
1891 else:
1881 else:
1892 msng_filenode_lst = []
1882 msng_filenode_lst = []
1893 # If any filenodes are left, generate the group for them,
1883 # If any filenodes are left, generate the group for them,
1894 # otherwise don't bother.
1884 # otherwise don't bother.
1895 if len(msng_filenode_lst) > 0:
1885 if len(msng_filenode_lst) > 0:
1896 yield changegroup.chunkheader(len(fname))
1886 yield changegroup.chunkheader(len(fname))
1897 yield fname
1887 yield fname
1898 # Sort the filenodes by their revision #
1888 # Sort the filenodes by their revision #
1899 msng_filenode_lst.sort(key=filerevlog.rev)
1889 msng_filenode_lst.sort(key=filerevlog.rev)
1900 # Create a group generator and only pass in a changenode
1890 # Create a group generator and only pass in a changenode
1901 # lookup function as we need to collect no information
1891 # lookup function as we need to collect no information
1902 # from filenodes.
1892 # from filenodes.
1903 group = filerevlog.group(msng_filenode_lst,
1893 group = filerevlog.group(msng_filenode_lst,
1904 lookup_filenode_link_func(fname))
1894 lookup_filenode_link_func(fname))
1905 for chnk in group:
1895 for chnk in group:
1906 yield chnk
1896 yield chnk
1907 if fname in msng_filenode_set:
1897 if fname in msng_filenode_set:
1908 # Don't need this anymore, toss it to free memory.
1898 # Don't need this anymore, toss it to free memory.
1909 del msng_filenode_set[fname]
1899 del msng_filenode_set[fname]
1910 # Signal that no more groups are left.
1900 # Signal that no more groups are left.
1911 yield changegroup.closechunk()
1901 yield changegroup.closechunk()
1912
1902
1913 if msng_cl_lst:
1903 if msng_cl_lst:
1914 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1904 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1915
1905
1916 return util.chunkbuffer(gengroup())
1906 return util.chunkbuffer(gengroup())
1917
1907
1918 def changegroup(self, basenodes, source):
1908 def changegroup(self, basenodes, source):
1919 # to avoid a race we use changegroupsubset() (issue1320)
1909 # to avoid a race we use changegroupsubset() (issue1320)
1920 return self.changegroupsubset(basenodes, self.heads(), source)
1910 return self.changegroupsubset(basenodes, self.heads(), source)
1921
1911
1922 def _changegroup(self, nodes, source):
1912 def _changegroup(self, nodes, source):
1923 """Compute the changegroup of all nodes that we have that a recipient
1913 """Compute the changegroup of all nodes that we have that a recipient
1924 doesn't. Return a chunkbuffer object whose read() method will return
1914 doesn't. Return a chunkbuffer object whose read() method will return
1925 successive changegroup chunks.
1915 successive changegroup chunks.
1926
1916
1927 This is much easier than the previous function as we can assume that
1917 This is much easier than the previous function as we can assume that
1928 the recipient has any changenode we aren't sending them.
1918 the recipient has any changenode we aren't sending them.
1929
1919
1930 nodes is the set of nodes to send"""
1920 nodes is the set of nodes to send"""
1931
1921
1932 self.hook('preoutgoing', throw=True, source=source)
1922 self.hook('preoutgoing', throw=True, source=source)
1933
1923
1934 cl = self.changelog
1924 cl = self.changelog
1935 revset = set([cl.rev(n) for n in nodes])
1925 revset = set([cl.rev(n) for n in nodes])
1936 self.changegroupinfo(nodes, source)
1926 self.changegroupinfo(nodes, source)
1937
1927
1938 def identity(x):
1928 def identity(x):
1939 return x
1929 return x
1940
1930
1941 def gennodelst(log):
1931 def gennodelst(log):
1942 for r in log:
1932 for r in log:
1943 if log.linkrev(r) in revset:
1933 if log.linkrev(r) in revset:
1944 yield log.node(r)
1934 yield log.node(r)
1945
1935
1946 def changed_file_collector(changedfileset):
1936 def changed_file_collector(changedfileset):
1947 def collect_changed_files(clnode):
1937 def collect_changed_files(clnode):
1948 c = cl.read(clnode)
1938 c = cl.read(clnode)
1949 changedfileset.update(c[3])
1939 changedfileset.update(c[3])
1950 return collect_changed_files
1940 return collect_changed_files
1951
1941
1952 def lookuprevlink_func(revlog):
1942 def lookuprevlink_func(revlog):
1953 def lookuprevlink(n):
1943 def lookuprevlink(n):
1954 return cl.node(revlog.linkrev(revlog.rev(n)))
1944 return cl.node(revlog.linkrev(revlog.rev(n)))
1955 return lookuprevlink
1945 return lookuprevlink
1956
1946
1957 def gengroup():
1947 def gengroup():
1958 '''yield a sequence of changegroup chunks (strings)'''
1948 '''yield a sequence of changegroup chunks (strings)'''
1959 # construct a list of all changed files
1949 # construct a list of all changed files
1960 changedfiles = set()
1950 changedfiles = set()
1961
1951
1962 for chnk in cl.group(nodes, identity,
1952 for chnk in cl.group(nodes, identity,
1963 changed_file_collector(changedfiles)):
1953 changed_file_collector(changedfiles)):
1964 yield chnk
1954 yield chnk
1965
1955
1966 mnfst = self.manifest
1956 mnfst = self.manifest
1967 nodeiter = gennodelst(mnfst)
1957 nodeiter = gennodelst(mnfst)
1968 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1958 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1969 yield chnk
1959 yield chnk
1970
1960
1971 for fname in sorted(changedfiles):
1961 for fname in sorted(changedfiles):
1972 filerevlog = self.file(fname)
1962 filerevlog = self.file(fname)
1973 if not len(filerevlog):
1963 if not len(filerevlog):
1974 raise util.Abort(_("empty or missing revlog for %s") % fname)
1964 raise util.Abort(_("empty or missing revlog for %s") % fname)
1975 nodeiter = gennodelst(filerevlog)
1965 nodeiter = gennodelst(filerevlog)
1976 nodeiter = list(nodeiter)
1966 nodeiter = list(nodeiter)
1977 if nodeiter:
1967 if nodeiter:
1978 yield changegroup.chunkheader(len(fname))
1968 yield changegroup.chunkheader(len(fname))
1979 yield fname
1969 yield fname
1980 lookup = lookuprevlink_func(filerevlog)
1970 lookup = lookuprevlink_func(filerevlog)
1981 for chnk in filerevlog.group(nodeiter, lookup):
1971 for chnk in filerevlog.group(nodeiter, lookup):
1982 yield chnk
1972 yield chnk
1983
1973
1984 yield changegroup.closechunk()
1974 yield changegroup.closechunk()
1985
1975
1986 if nodes:
1976 if nodes:
1987 self.hook('outgoing', node=hex(nodes[0]), source=source)
1977 self.hook('outgoing', node=hex(nodes[0]), source=source)
1988
1978
1989 return util.chunkbuffer(gengroup())
1979 return util.chunkbuffer(gengroup())
1990
1980
1991 def addchangegroup(self, source, srctype, url, emptyok=False):
1981 def addchangegroup(self, source, srctype, url, emptyok=False):
1992 """add changegroup to repo.
1982 """add changegroup to repo.
1993
1983
1994 return values:
1984 return values:
1995 - nothing changed or no source: 0
1985 - nothing changed or no source: 0
1996 - more heads than before: 1+added heads (2..n)
1986 - more heads than before: 1+added heads (2..n)
1997 - less heads than before: -1-removed heads (-2..-n)
1987 - less heads than before: -1-removed heads (-2..-n)
1998 - number of heads stays the same: 1
1988 - number of heads stays the same: 1
1999 """
1989 """
2000 def csmap(x):
1990 def csmap(x):
2001 self.ui.debug("add changeset %s\n" % short(x))
1991 self.ui.debug("add changeset %s\n" % short(x))
2002 return len(cl)
1992 return len(cl)
2003
1993
2004 def revmap(x):
1994 def revmap(x):
2005 return cl.rev(x)
1995 return cl.rev(x)
2006
1996
2007 if not source:
1997 if not source:
2008 return 0
1998 return 0
2009
1999
2010 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2000 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2011
2001
2012 changesets = files = revisions = 0
2002 changesets = files = revisions = 0
2013
2003
2014 # write changelog data to temp files so concurrent readers will not see
2004 # write changelog data to temp files so concurrent readers will not see
2015 # inconsistent view
2005 # inconsistent view
2016 cl = self.changelog
2006 cl = self.changelog
2017 cl.delayupdate()
2007 cl.delayupdate()
2018 oldheads = len(cl.heads())
2008 oldheads = len(cl.heads())
2019
2009
2020 tr = self.transaction()
2010 tr = self.transaction()
2021 try:
2011 try:
2022 trp = weakref.proxy(tr)
2012 trp = weakref.proxy(tr)
2023 # pull off the changeset group
2013 # pull off the changeset group
2024 self.ui.status(_("adding changesets\n"))
2014 self.ui.status(_("adding changesets\n"))
2025 clstart = len(cl)
2015 clstart = len(cl)
2026 chunkiter = changegroup.chunkiter(source)
2016 chunkiter = changegroup.chunkiter(source)
2027 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2017 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2028 raise util.Abort(_("received changelog group is empty"))
2018 raise util.Abort(_("received changelog group is empty"))
2029 clend = len(cl)
2019 clend = len(cl)
2030 changesets = clend - clstart
2020 changesets = clend - clstart
2031
2021
2032 # pull off the manifest group
2022 # pull off the manifest group
2033 self.ui.status(_("adding manifests\n"))
2023 self.ui.status(_("adding manifests\n"))
2034 chunkiter = changegroup.chunkiter(source)
2024 chunkiter = changegroup.chunkiter(source)
2035 # no need to check for empty manifest group here:
2025 # no need to check for empty manifest group here:
2036 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2026 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2037 # no new manifest will be created and the manifest group will
2027 # no new manifest will be created and the manifest group will
2038 # be empty during the pull
2028 # be empty during the pull
2039 self.manifest.addgroup(chunkiter, revmap, trp)
2029 self.manifest.addgroup(chunkiter, revmap, trp)
2040
2030
2041 # process the files
2031 # process the files
2042 self.ui.status(_("adding file changes\n"))
2032 self.ui.status(_("adding file changes\n"))
2043 while 1:
2033 while 1:
2044 f = changegroup.getchunk(source)
2034 f = changegroup.getchunk(source)
2045 if not f:
2035 if not f:
2046 break
2036 break
2047 self.ui.debug("adding %s revisions\n" % f)
2037 self.ui.debug("adding %s revisions\n" % f)
2048 fl = self.file(f)
2038 fl = self.file(f)
2049 o = len(fl)
2039 o = len(fl)
2050 chunkiter = changegroup.chunkiter(source)
2040 chunkiter = changegroup.chunkiter(source)
2051 if fl.addgroup(chunkiter, revmap, trp) is None:
2041 if fl.addgroup(chunkiter, revmap, trp) is None:
2052 raise util.Abort(_("received file revlog group is empty"))
2042 raise util.Abort(_("received file revlog group is empty"))
2053 revisions += len(fl) - o
2043 revisions += len(fl) - o
2054 files += 1
2044 files += 1
2055
2045
2056 newheads = len(cl.heads())
2046 newheads = len(cl.heads())
2057 heads = ""
2047 heads = ""
2058 if oldheads and newheads != oldheads:
2048 if oldheads and newheads != oldheads:
2059 heads = _(" (%+d heads)") % (newheads - oldheads)
2049 heads = _(" (%+d heads)") % (newheads - oldheads)
2060
2050
2061 self.ui.status(_("added %d changesets"
2051 self.ui.status(_("added %d changesets"
2062 " with %d changes to %d files%s\n")
2052 " with %d changes to %d files%s\n")
2063 % (changesets, revisions, files, heads))
2053 % (changesets, revisions, files, heads))
2064
2054
2065 if changesets > 0:
2055 if changesets > 0:
2066 p = lambda: cl.writepending() and self.root or ""
2056 p = lambda: cl.writepending() and self.root or ""
2067 self.hook('pretxnchangegroup', throw=True,
2057 self.hook('pretxnchangegroup', throw=True,
2068 node=hex(cl.node(clstart)), source=srctype,
2058 node=hex(cl.node(clstart)), source=srctype,
2069 url=url, pending=p)
2059 url=url, pending=p)
2070
2060
2071 # make changelog see real files again
2061 # make changelog see real files again
2072 cl.finalize(trp)
2062 cl.finalize(trp)
2073
2063
2074 tr.close()
2064 tr.close()
2075 finally:
2065 finally:
2076 del tr
2066 del tr
2077
2067
2078 if changesets > 0:
2068 if changesets > 0:
2079 # forcefully update the on-disk branch cache
2069 # forcefully update the on-disk branch cache
2080 self.ui.debug("updating the branch cache\n")
2070 self.ui.debug("updating the branch cache\n")
2081 self.branchtags()
2071 self.branchtags()
2082 self.hook("changegroup", node=hex(cl.node(clstart)),
2072 self.hook("changegroup", node=hex(cl.node(clstart)),
2083 source=srctype, url=url)
2073 source=srctype, url=url)
2084
2074
2085 for i in xrange(clstart, clend):
2075 for i in xrange(clstart, clend):
2086 self.hook("incoming", node=hex(cl.node(i)),
2076 self.hook("incoming", node=hex(cl.node(i)),
2087 source=srctype, url=url)
2077 source=srctype, url=url)
2088
2078
2089 # never return 0 here:
2079 # never return 0 here:
2090 if newheads < oldheads:
2080 if newheads < oldheads:
2091 return newheads - oldheads - 1
2081 return newheads - oldheads - 1
2092 else:
2082 else:
2093 return newheads - oldheads + 1
2083 return newheads - oldheads + 1
2094
2084
2095
2085
2096 def stream_in(self, remote):
2086 def stream_in(self, remote):
2097 fp = remote.stream_out()
2087 fp = remote.stream_out()
2098 l = fp.readline()
2088 l = fp.readline()
2099 try:
2089 try:
2100 resp = int(l)
2090 resp = int(l)
2101 except ValueError:
2091 except ValueError:
2102 raise error.ResponseError(
2092 raise error.ResponseError(
2103 _('Unexpected response from remote server:'), l)
2093 _('Unexpected response from remote server:'), l)
2104 if resp == 1:
2094 if resp == 1:
2105 raise util.Abort(_('operation forbidden by server'))
2095 raise util.Abort(_('operation forbidden by server'))
2106 elif resp == 2:
2096 elif resp == 2:
2107 raise util.Abort(_('locking the remote repository failed'))
2097 raise util.Abort(_('locking the remote repository failed'))
2108 elif resp != 0:
2098 elif resp != 0:
2109 raise util.Abort(_('the server sent an unknown error code'))
2099 raise util.Abort(_('the server sent an unknown error code'))
2110 self.ui.status(_('streaming all changes\n'))
2100 self.ui.status(_('streaming all changes\n'))
2111 l = fp.readline()
2101 l = fp.readline()
2112 try:
2102 try:
2113 total_files, total_bytes = map(int, l.split(' ', 1))
2103 total_files, total_bytes = map(int, l.split(' ', 1))
2114 except (ValueError, TypeError):
2104 except (ValueError, TypeError):
2115 raise error.ResponseError(
2105 raise error.ResponseError(
2116 _('Unexpected response from remote server:'), l)
2106 _('Unexpected response from remote server:'), l)
2117 self.ui.status(_('%d files to transfer, %s of data\n') %
2107 self.ui.status(_('%d files to transfer, %s of data\n') %
2118 (total_files, util.bytecount(total_bytes)))
2108 (total_files, util.bytecount(total_bytes)))
2119 start = time.time()
2109 start = time.time()
2120 for i in xrange(total_files):
2110 for i in xrange(total_files):
2121 # XXX doesn't support '\n' or '\r' in filenames
2111 # XXX doesn't support '\n' or '\r' in filenames
2122 l = fp.readline()
2112 l = fp.readline()
2123 try:
2113 try:
2124 name, size = l.split('\0', 1)
2114 name, size = l.split('\0', 1)
2125 size = int(size)
2115 size = int(size)
2126 except (ValueError, TypeError):
2116 except (ValueError, TypeError):
2127 raise error.ResponseError(
2117 raise error.ResponseError(
2128 _('Unexpected response from remote server:'), l)
2118 _('Unexpected response from remote server:'), l)
2129 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2119 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2130 # for backwards compat, name was partially encoded
2120 # for backwards compat, name was partially encoded
2131 ofp = self.sopener(store.decodedir(name), 'w')
2121 ofp = self.sopener(store.decodedir(name), 'w')
2132 for chunk in util.filechunkiter(fp, limit=size):
2122 for chunk in util.filechunkiter(fp, limit=size):
2133 ofp.write(chunk)
2123 ofp.write(chunk)
2134 ofp.close()
2124 ofp.close()
2135 elapsed = time.time() - start
2125 elapsed = time.time() - start
2136 if elapsed <= 0:
2126 if elapsed <= 0:
2137 elapsed = 0.001
2127 elapsed = 0.001
2138 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2128 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2139 (util.bytecount(total_bytes), elapsed,
2129 (util.bytecount(total_bytes), elapsed,
2140 util.bytecount(total_bytes / elapsed)))
2130 util.bytecount(total_bytes / elapsed)))
2141 self.invalidate()
2131 self.invalidate()
2142 return len(self.heads()) + 1
2132 return len(self.heads()) + 1
2143
2133
2144 def clone(self, remote, heads=[], stream=False):
2134 def clone(self, remote, heads=[], stream=False):
2145 '''clone remote repository.
2135 '''clone remote repository.
2146
2136
2147 keyword arguments:
2137 keyword arguments:
2148 heads: list of revs to clone (forces use of pull)
2138 heads: list of revs to clone (forces use of pull)
2149 stream: use streaming clone if possible'''
2139 stream: use streaming clone if possible'''
2150
2140
2151 # now, all clients that can request uncompressed clones can
2141 # now, all clients that can request uncompressed clones can
2152 # read repo formats supported by all servers that can serve
2142 # read repo formats supported by all servers that can serve
2153 # them.
2143 # them.
2154
2144
2155 # if revlog format changes, client will have to check version
2145 # if revlog format changes, client will have to check version
2156 # and format flags on "stream" capability, and use
2146 # and format flags on "stream" capability, and use
2157 # uncompressed only if compatible.
2147 # uncompressed only if compatible.
2158
2148
2159 if stream and not heads and remote.capable('stream'):
2149 if stream and not heads and remote.capable('stream'):
2160 return self.stream_in(remote)
2150 return self.stream_in(remote)
2161 return self.pull(remote, heads)
2151 return self.pull(remote, heads)
2162
2152
2163 # used to avoid circular references so destructors work
2153 # used to avoid circular references so destructors work
2164 def aftertrans(files):
2154 def aftertrans(files):
2165 renamefiles = [tuple(t) for t in files]
2155 renamefiles = [tuple(t) for t in files]
2166 def a():
2156 def a():
2167 for src, dest in renamefiles:
2157 for src, dest in renamefiles:
2168 util.rename(src, dest)
2158 util.rename(src, dest)
2169 return a
2159 return a
2170
2160
2171 def instance(ui, path, create):
2161 def instance(ui, path, create):
2172 return localrepository(ui, util.drop_scheme('file', path), create)
2162 return localrepository(ui, util.drop_scheme('file', path), create)
2173
2163
2174 def islocal(path):
2164 def islocal(path):
2175 return True
2165 return True
General Comments 0
You need to be logged in to leave comments. Login now