##// END OF EJS Templates
localrepo: add a quick docstring for localrepo.branchmap()
Dirkjan Ochtman -
r10327:32197f7e default
parent child Browse files
Show More
@@ -1,2170 +1,2171 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92 self.sopener.options = {}
92 self.sopener.options = {}
93
93
94 # These two define the set of tags for this repository. _tags
94 # These two define the set of tags for this repository. _tags
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # 'local'. (Global tags are defined by .hgtags across all
96 # 'local'. (Global tags are defined by .hgtags across all
97 # heads, and local tags are defined in .hg/localtags.) They
97 # heads, and local tags are defined in .hg/localtags.) They
98 # constitute the in-memory cache of tags.
98 # constitute the in-memory cache of tags.
99 self._tags = None
99 self._tags = None
100 self._tagtypes = None
100 self._tagtypes = None
101
101
102 self._branchcache = None # in UTF-8
102 self._branchcache = None # in UTF-8
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.options['defversion'] = c.version
116 self.sopener.options['defversion'] = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __contains__(self, changeid):
132 def __contains__(self, changeid):
133 try:
133 try:
134 return bool(self.lookup(changeid))
134 return bool(self.lookup(changeid))
135 except error.RepoLookupError:
135 except error.RepoLookupError:
136 return False
136 return False
137
137
138 def __nonzero__(self):
138 def __nonzero__(self):
139 return True
139 return True
140
140
141 def __len__(self):
141 def __len__(self):
142 return len(self.changelog)
142 return len(self.changelog)
143
143
144 def __iter__(self):
144 def __iter__(self):
145 for i in xrange(len(self)):
145 for i in xrange(len(self)):
146 yield i
146 yield i
147
147
148 def url(self):
148 def url(self):
149 return 'file:' + self.root
149 return 'file:' + self.root
150
150
151 def hook(self, name, throw=False, **args):
151 def hook(self, name, throw=False, **args):
152 return hook.hook(self.ui, self, name, throw, **args)
152 return hook.hook(self.ui, self, name, throw, **args)
153
153
154 tag_disallowed = ':\r\n'
154 tag_disallowed = ':\r\n'
155
155
156 def _tag(self, names, node, message, local, user, date, extra={}):
156 def _tag(self, names, node, message, local, user, date, extra={}):
157 if isinstance(names, str):
157 if isinstance(names, str):
158 allchars = names
158 allchars = names
159 names = (names,)
159 names = (names,)
160 else:
160 else:
161 allchars = ''.join(names)
161 allchars = ''.join(names)
162 for c in self.tag_disallowed:
162 for c in self.tag_disallowed:
163 if c in allchars:
163 if c in allchars:
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165
165
166 for name in names:
166 for name in names:
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 local=local)
168 local=local)
169
169
170 def writetags(fp, names, munge, prevtags):
170 def writetags(fp, names, munge, prevtags):
171 fp.seek(0, 2)
171 fp.seek(0, 2)
172 if prevtags and prevtags[-1] != '\n':
172 if prevtags and prevtags[-1] != '\n':
173 fp.write('\n')
173 fp.write('\n')
174 for name in names:
174 for name in names:
175 m = munge and munge(name) or name
175 m = munge and munge(name) or name
176 if self._tagtypes and name in self._tagtypes:
176 if self._tagtypes and name in self._tagtypes:
177 old = self._tags.get(name, nullid)
177 old = self._tags.get(name, nullid)
178 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(old), m))
179 fp.write('%s %s\n' % (hex(node), m))
179 fp.write('%s %s\n' % (hex(node), m))
180 fp.close()
180 fp.close()
181
181
182 prevtags = ''
182 prevtags = ''
183 if local:
183 if local:
184 try:
184 try:
185 fp = self.opener('localtags', 'r+')
185 fp = self.opener('localtags', 'r+')
186 except IOError:
186 except IOError:
187 fp = self.opener('localtags', 'a')
187 fp = self.opener('localtags', 'a')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # local tags are stored in the current charset
191 # local tags are stored in the current charset
192 writetags(fp, names, None, prevtags)
192 writetags(fp, names, None, prevtags)
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195 return
195 return
196
196
197 try:
197 try:
198 fp = self.wfile('.hgtags', 'rb+')
198 fp = self.wfile('.hgtags', 'rb+')
199 except IOError:
199 except IOError:
200 fp = self.wfile('.hgtags', 'ab')
200 fp = self.wfile('.hgtags', 'ab')
201 else:
201 else:
202 prevtags = fp.read()
202 prevtags = fp.read()
203
203
204 # committed tags are stored in UTF-8
204 # committed tags are stored in UTF-8
205 writetags(fp, names, encoding.fromlocal, prevtags)
205 writetags(fp, names, encoding.fromlocal, prevtags)
206
206
207 if '.hgtags' not in self.dirstate:
207 if '.hgtags' not in self.dirstate:
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 m = match_.exact(self.root, '', ['.hgtags'])
210 m = match_.exact(self.root, '', ['.hgtags'])
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212
212
213 for name in names:
213 for name in names:
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 return tagnode
216 return tagnode
217
217
218 def tag(self, names, node, message, local, user, date):
218 def tag(self, names, node, message, local, user, date):
219 '''tag a revision with one or more symbolic names.
219 '''tag a revision with one or more symbolic names.
220
220
221 names is a list of strings or, when adding a single tag, names may be a
221 names is a list of strings or, when adding a single tag, names may be a
222 string.
222 string.
223
223
224 if local is True, the tags are stored in a per-repository file.
224 if local is True, the tags are stored in a per-repository file.
225 otherwise, they are stored in the .hgtags file, and a new
225 otherwise, they are stored in the .hgtags file, and a new
226 changeset is committed with the change.
226 changeset is committed with the change.
227
227
228 keyword arguments:
228 keyword arguments:
229
229
230 local: whether to store tags in non-version-controlled file
230 local: whether to store tags in non-version-controlled file
231 (default False)
231 (default False)
232
232
233 message: commit message to use if committing
233 message: commit message to use if committing
234
234
235 user: name of user to use if committing
235 user: name of user to use if committing
236
236
237 date: date tuple to use if committing'''
237 date: date tuple to use if committing'''
238
238
239 for x in self.status()[:5]:
239 for x in self.status()[:5]:
240 if '.hgtags' in x:
240 if '.hgtags' in x:
241 raise util.Abort(_('working copy of .hgtags is changed '
241 raise util.Abort(_('working copy of .hgtags is changed '
242 '(please commit .hgtags manually)'))
242 '(please commit .hgtags manually)'))
243
243
244 self.tags() # instantiate the cache
244 self.tags() # instantiate the cache
245 self._tag(names, node, message, local, user, date)
245 self._tag(names, node, message, local, user, date)
246
246
247 def tags(self):
247 def tags(self):
248 '''return a mapping of tag to node'''
248 '''return a mapping of tag to node'''
249 if self._tags is None:
249 if self._tags is None:
250 (self._tags, self._tagtypes) = self._findtags()
250 (self._tags, self._tagtypes) = self._findtags()
251
251
252 return self._tags
252 return self._tags
253
253
254 def _findtags(self):
254 def _findtags(self):
255 '''Do the hard work of finding tags. Return a pair of dicts
255 '''Do the hard work of finding tags. Return a pair of dicts
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 maps tag name to a string like \'global\' or \'local\'.
257 maps tag name to a string like \'global\' or \'local\'.
258 Subclasses or extensions are free to add their own tags, but
258 Subclasses or extensions are free to add their own tags, but
259 should be aware that the returned dicts will be retained for the
259 should be aware that the returned dicts will be retained for the
260 duration of the localrepo object.'''
260 duration of the localrepo object.'''
261
261
262 # XXX what tagtype should subclasses/extensions use? Currently
262 # XXX what tagtype should subclasses/extensions use? Currently
263 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 # Should each extension invent its own tag type? Should there
264 # Should each extension invent its own tag type? Should there
265 # be one tagtype for all such "virtual" tags? Or is the status
265 # be one tagtype for all such "virtual" tags? Or is the status
266 # quo fine?
266 # quo fine?
267
267
268 alltags = {} # map tag name to (node, hist)
268 alltags = {} # map tag name to (node, hist)
269 tagtypes = {}
269 tagtypes = {}
270
270
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
273
273
274 # Build the return dicts. Have to re-encode tag names because
274 # Build the return dicts. Have to re-encode tag names because
275 # the tags module always uses UTF-8 (in order not to lose info
275 # the tags module always uses UTF-8 (in order not to lose info
276 # writing to the cache), but the rest of Mercurial wants them in
276 # writing to the cache), but the rest of Mercurial wants them in
277 # local encoding.
277 # local encoding.
278 tags = {}
278 tags = {}
279 for (name, (node, hist)) in alltags.iteritems():
279 for (name, (node, hist)) in alltags.iteritems():
280 if node != nullid:
280 if node != nullid:
281 tags[encoding.tolocal(name)] = node
281 tags[encoding.tolocal(name)] = node
282 tags['tip'] = self.changelog.tip()
282 tags['tip'] = self.changelog.tip()
283 tagtypes = dict([(encoding.tolocal(name), value)
283 tagtypes = dict([(encoding.tolocal(name), value)
284 for (name, value) in tagtypes.iteritems()])
284 for (name, value) in tagtypes.iteritems()])
285 return (tags, tagtypes)
285 return (tags, tagtypes)
286
286
287 def tagtype(self, tagname):
287 def tagtype(self, tagname):
288 '''
288 '''
289 return the type of the given tag. result can be:
289 return the type of the given tag. result can be:
290
290
291 'local' : a local tag
291 'local' : a local tag
292 'global' : a global tag
292 'global' : a global tag
293 None : tag does not exist
293 None : tag does not exist
294 '''
294 '''
295
295
296 self.tags()
296 self.tags()
297
297
298 return self._tagtypes.get(tagname)
298 return self._tagtypes.get(tagname)
299
299
300 def tagslist(self):
300 def tagslist(self):
301 '''return a list of tags ordered by revision'''
301 '''return a list of tags ordered by revision'''
302 l = []
302 l = []
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 try:
304 try:
305 r = self.changelog.rev(n)
305 r = self.changelog.rev(n)
306 except:
306 except:
307 r = -2 # sort to the beginning of the list if unknown
307 r = -2 # sort to the beginning of the list if unknown
308 l.append((r, t, n))
308 l.append((r, t, n))
309 return [(t, n) for r, t, n in sorted(l)]
309 return [(t, n) for r, t, n in sorted(l)]
310
310
311 def nodetags(self, node):
311 def nodetags(self, node):
312 '''return the tags associated with a node'''
312 '''return the tags associated with a node'''
313 if not self.nodetagscache:
313 if not self.nodetagscache:
314 self.nodetagscache = {}
314 self.nodetagscache = {}
315 for t, n in self.tags().iteritems():
315 for t, n in self.tags().iteritems():
316 self.nodetagscache.setdefault(n, []).append(t)
316 self.nodetagscache.setdefault(n, []).append(t)
317 return self.nodetagscache.get(node, [])
317 return self.nodetagscache.get(node, [])
318
318
319 def _branchtags(self, partial, lrev):
319 def _branchtags(self, partial, lrev):
320 # TODO: rename this function?
320 # TODO: rename this function?
321 tiprev = len(self) - 1
321 tiprev = len(self) - 1
322 if lrev != tiprev:
322 if lrev != tiprev:
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325
325
326 return partial
326 return partial
327
327
328 def branchmap(self):
328 def branchmap(self):
329 '''returns a dictionary {branch: [branchheads]}'''
329 tip = self.changelog.tip()
330 tip = self.changelog.tip()
330 if self._branchcache is not None and self._branchcachetip == tip:
331 if self._branchcache is not None and self._branchcachetip == tip:
331 return self._branchcache
332 return self._branchcache
332
333
333 oldtip = self._branchcachetip
334 oldtip = self._branchcachetip
334 self._branchcachetip = tip
335 self._branchcachetip = tip
335 if oldtip is None or oldtip not in self.changelog.nodemap:
336 if oldtip is None or oldtip not in self.changelog.nodemap:
336 partial, last, lrev = self._readbranchcache()
337 partial, last, lrev = self._readbranchcache()
337 else:
338 else:
338 lrev = self.changelog.rev(oldtip)
339 lrev = self.changelog.rev(oldtip)
339 partial = self._branchcache
340 partial = self._branchcache
340
341
341 self._branchtags(partial, lrev)
342 self._branchtags(partial, lrev)
342 # this private cache holds all heads (not just tips)
343 # this private cache holds all heads (not just tips)
343 self._branchcache = partial
344 self._branchcache = partial
344
345
345 return self._branchcache
346 return self._branchcache
346
347
347 def branchtags(self):
348 def branchtags(self):
348 '''return a dict where branch names map to the tipmost head of
349 '''return a dict where branch names map to the tipmost head of
349 the branch, open heads come before closed'''
350 the branch, open heads come before closed'''
350 bt = {}
351 bt = {}
351 for bn, heads in self.branchmap().iteritems():
352 for bn, heads in self.branchmap().iteritems():
352 head = None
353 head = None
353 for i in range(len(heads)-1, -1, -1):
354 for i in range(len(heads)-1, -1, -1):
354 h = heads[i]
355 h = heads[i]
355 if 'close' not in self.changelog.read(h)[5]:
356 if 'close' not in self.changelog.read(h)[5]:
356 head = h
357 head = h
357 break
358 break
358 # no open heads were found
359 # no open heads were found
359 if head is None:
360 if head is None:
360 head = heads[-1]
361 head = heads[-1]
361 bt[bn] = head
362 bt[bn] = head
362 return bt
363 return bt
363
364
364
365
365 def _readbranchcache(self):
366 def _readbranchcache(self):
366 partial = {}
367 partial = {}
367 try:
368 try:
368 f = self.opener("branchheads.cache")
369 f = self.opener("branchheads.cache")
369 lines = f.read().split('\n')
370 lines = f.read().split('\n')
370 f.close()
371 f.close()
371 except (IOError, OSError):
372 except (IOError, OSError):
372 return {}, nullid, nullrev
373 return {}, nullid, nullrev
373
374
374 try:
375 try:
375 last, lrev = lines.pop(0).split(" ", 1)
376 last, lrev = lines.pop(0).split(" ", 1)
376 last, lrev = bin(last), int(lrev)
377 last, lrev = bin(last), int(lrev)
377 if lrev >= len(self) or self[lrev].node() != last:
378 if lrev >= len(self) or self[lrev].node() != last:
378 # invalidate the cache
379 # invalidate the cache
379 raise ValueError('invalidating branch cache (tip differs)')
380 raise ValueError('invalidating branch cache (tip differs)')
380 for l in lines:
381 for l in lines:
381 if not l:
382 if not l:
382 continue
383 continue
383 node, label = l.split(" ", 1)
384 node, label = l.split(" ", 1)
384 partial.setdefault(label.strip(), []).append(bin(node))
385 partial.setdefault(label.strip(), []).append(bin(node))
385 except KeyboardInterrupt:
386 except KeyboardInterrupt:
386 raise
387 raise
387 except Exception, inst:
388 except Exception, inst:
388 if self.ui.debugflag:
389 if self.ui.debugflag:
389 self.ui.warn(str(inst), '\n')
390 self.ui.warn(str(inst), '\n')
390 partial, last, lrev = {}, nullid, nullrev
391 partial, last, lrev = {}, nullid, nullrev
391 return partial, last, lrev
392 return partial, last, lrev
392
393
393 def _writebranchcache(self, branches, tip, tiprev):
394 def _writebranchcache(self, branches, tip, tiprev):
394 try:
395 try:
395 f = self.opener("branchheads.cache", "w", atomictemp=True)
396 f = self.opener("branchheads.cache", "w", atomictemp=True)
396 f.write("%s %s\n" % (hex(tip), tiprev))
397 f.write("%s %s\n" % (hex(tip), tiprev))
397 for label, nodes in branches.iteritems():
398 for label, nodes in branches.iteritems():
398 for node in nodes:
399 for node in nodes:
399 f.write("%s %s\n" % (hex(node), label))
400 f.write("%s %s\n" % (hex(node), label))
400 f.rename()
401 f.rename()
401 except (IOError, OSError):
402 except (IOError, OSError):
402 pass
403 pass
403
404
404 def _updatebranchcache(self, partial, start, end):
405 def _updatebranchcache(self, partial, start, end):
405 # collect new branch entries
406 # collect new branch entries
406 newbranches = {}
407 newbranches = {}
407 for r in xrange(start, end):
408 for r in xrange(start, end):
408 c = self[r]
409 c = self[r]
409 newbranches.setdefault(c.branch(), []).append(c.node())
410 newbranches.setdefault(c.branch(), []).append(c.node())
410 # if older branchheads are reachable from new ones, they aren't
411 # if older branchheads are reachable from new ones, they aren't
411 # really branchheads. Note checking parents is insufficient:
412 # really branchheads. Note checking parents is insufficient:
412 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
413 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
413 for branch, newnodes in newbranches.iteritems():
414 for branch, newnodes in newbranches.iteritems():
414 bheads = partial.setdefault(branch, [])
415 bheads = partial.setdefault(branch, [])
415 bheads.extend(newnodes)
416 bheads.extend(newnodes)
416 if len(bheads) < 2:
417 if len(bheads) < 2:
417 continue
418 continue
418 newbheads = []
419 newbheads = []
419 # starting from tip means fewer passes over reachable
420 # starting from tip means fewer passes over reachable
420 while newnodes:
421 while newnodes:
421 latest = newnodes.pop()
422 latest = newnodes.pop()
422 if latest not in bheads:
423 if latest not in bheads:
423 continue
424 continue
424 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
425 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
425 reachable = self.changelog.reachable(latest, minbhrev)
426 reachable = self.changelog.reachable(latest, minbhrev)
426 bheads = [b for b in bheads if b not in reachable]
427 bheads = [b for b in bheads if b not in reachable]
427 newbheads.insert(0, latest)
428 newbheads.insert(0, latest)
428 bheads.extend(newbheads)
429 bheads.extend(newbheads)
429 partial[branch] = bheads
430 partial[branch] = bheads
430
431
431 def lookup(self, key):
432 def lookup(self, key):
432 if isinstance(key, int):
433 if isinstance(key, int):
433 return self.changelog.node(key)
434 return self.changelog.node(key)
434 elif key == '.':
435 elif key == '.':
435 return self.dirstate.parents()[0]
436 return self.dirstate.parents()[0]
436 elif key == 'null':
437 elif key == 'null':
437 return nullid
438 return nullid
438 elif key == 'tip':
439 elif key == 'tip':
439 return self.changelog.tip()
440 return self.changelog.tip()
440 n = self.changelog._match(key)
441 n = self.changelog._match(key)
441 if n:
442 if n:
442 return n
443 return n
443 if key in self.tags():
444 if key in self.tags():
444 return self.tags()[key]
445 return self.tags()[key]
445 if key in self.branchtags():
446 if key in self.branchtags():
446 return self.branchtags()[key]
447 return self.branchtags()[key]
447 n = self.changelog._partialmatch(key)
448 n = self.changelog._partialmatch(key)
448 if n:
449 if n:
449 return n
450 return n
450
451
451 # can't find key, check if it might have come from damaged dirstate
452 # can't find key, check if it might have come from damaged dirstate
452 if key in self.dirstate.parents():
453 if key in self.dirstate.parents():
453 raise error.Abort(_("working directory has unknown parent '%s'!")
454 raise error.Abort(_("working directory has unknown parent '%s'!")
454 % short(key))
455 % short(key))
455 try:
456 try:
456 if len(key) == 20:
457 if len(key) == 20:
457 key = hex(key)
458 key = hex(key)
458 except:
459 except:
459 pass
460 pass
460 raise error.RepoLookupError(_("unknown revision '%s'") % key)
461 raise error.RepoLookupError(_("unknown revision '%s'") % key)
461
462
462 def local(self):
463 def local(self):
463 return True
464 return True
464
465
465 def join(self, f):
466 def join(self, f):
466 return os.path.join(self.path, f)
467 return os.path.join(self.path, f)
467
468
468 def wjoin(self, f):
469 def wjoin(self, f):
469 return os.path.join(self.root, f)
470 return os.path.join(self.root, f)
470
471
471 def rjoin(self, f):
472 def rjoin(self, f):
472 return os.path.join(self.root, util.pconvert(f))
473 return os.path.join(self.root, util.pconvert(f))
473
474
474 def file(self, f):
475 def file(self, f):
475 if f[0] == '/':
476 if f[0] == '/':
476 f = f[1:]
477 f = f[1:]
477 return filelog.filelog(self.sopener, f)
478 return filelog.filelog(self.sopener, f)
478
479
479 def changectx(self, changeid):
480 def changectx(self, changeid):
480 return self[changeid]
481 return self[changeid]
481
482
482 def parents(self, changeid=None):
483 def parents(self, changeid=None):
483 '''get list of changectxs for parents of changeid'''
484 '''get list of changectxs for parents of changeid'''
484 return self[changeid].parents()
485 return self[changeid].parents()
485
486
486 def filectx(self, path, changeid=None, fileid=None):
487 def filectx(self, path, changeid=None, fileid=None):
487 """changeid can be a changeset revision, node, or tag.
488 """changeid can be a changeset revision, node, or tag.
488 fileid can be a file revision or node."""
489 fileid can be a file revision or node."""
489 return context.filectx(self, path, changeid, fileid)
490 return context.filectx(self, path, changeid, fileid)
490
491
491 def getcwd(self):
492 def getcwd(self):
492 return self.dirstate.getcwd()
493 return self.dirstate.getcwd()
493
494
494 def pathto(self, f, cwd=None):
495 def pathto(self, f, cwd=None):
495 return self.dirstate.pathto(f, cwd)
496 return self.dirstate.pathto(f, cwd)
496
497
497 def wfile(self, f, mode='r'):
498 def wfile(self, f, mode='r'):
498 return self.wopener(f, mode)
499 return self.wopener(f, mode)
499
500
500 def _link(self, f):
501 def _link(self, f):
501 return os.path.islink(self.wjoin(f))
502 return os.path.islink(self.wjoin(f))
502
503
503 def _filter(self, filter, filename, data):
504 def _filter(self, filter, filename, data):
504 if filter not in self.filterpats:
505 if filter not in self.filterpats:
505 l = []
506 l = []
506 for pat, cmd in self.ui.configitems(filter):
507 for pat, cmd in self.ui.configitems(filter):
507 if cmd == '!':
508 if cmd == '!':
508 continue
509 continue
509 mf = match_.match(self.root, '', [pat])
510 mf = match_.match(self.root, '', [pat])
510 fn = None
511 fn = None
511 params = cmd
512 params = cmd
512 for name, filterfn in self._datafilters.iteritems():
513 for name, filterfn in self._datafilters.iteritems():
513 if cmd.startswith(name):
514 if cmd.startswith(name):
514 fn = filterfn
515 fn = filterfn
515 params = cmd[len(name):].lstrip()
516 params = cmd[len(name):].lstrip()
516 break
517 break
517 if not fn:
518 if not fn:
518 fn = lambda s, c, **kwargs: util.filter(s, c)
519 fn = lambda s, c, **kwargs: util.filter(s, c)
519 # Wrap old filters not supporting keyword arguments
520 # Wrap old filters not supporting keyword arguments
520 if not inspect.getargspec(fn)[2]:
521 if not inspect.getargspec(fn)[2]:
521 oldfn = fn
522 oldfn = fn
522 fn = lambda s, c, **kwargs: oldfn(s, c)
523 fn = lambda s, c, **kwargs: oldfn(s, c)
523 l.append((mf, fn, params))
524 l.append((mf, fn, params))
524 self.filterpats[filter] = l
525 self.filterpats[filter] = l
525
526
526 for mf, fn, cmd in self.filterpats[filter]:
527 for mf, fn, cmd in self.filterpats[filter]:
527 if mf(filename):
528 if mf(filename):
528 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
529 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
529 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
530 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
530 break
531 break
531
532
532 return data
533 return data
533
534
534 def adddatafilter(self, name, filter):
535 def adddatafilter(self, name, filter):
535 self._datafilters[name] = filter
536 self._datafilters[name] = filter
536
537
537 def wread(self, filename):
538 def wread(self, filename):
538 if self._link(filename):
539 if self._link(filename):
539 data = os.readlink(self.wjoin(filename))
540 data = os.readlink(self.wjoin(filename))
540 else:
541 else:
541 data = self.wopener(filename, 'r').read()
542 data = self.wopener(filename, 'r').read()
542 return self._filter("encode", filename, data)
543 return self._filter("encode", filename, data)
543
544
544 def wwrite(self, filename, data, flags):
545 def wwrite(self, filename, data, flags):
545 data = self._filter("decode", filename, data)
546 data = self._filter("decode", filename, data)
546 try:
547 try:
547 os.unlink(self.wjoin(filename))
548 os.unlink(self.wjoin(filename))
548 except OSError:
549 except OSError:
549 pass
550 pass
550 if 'l' in flags:
551 if 'l' in flags:
551 self.wopener.symlink(data, filename)
552 self.wopener.symlink(data, filename)
552 else:
553 else:
553 self.wopener(filename, 'w').write(data)
554 self.wopener(filename, 'w').write(data)
554 if 'x' in flags:
555 if 'x' in flags:
555 util.set_flags(self.wjoin(filename), False, True)
556 util.set_flags(self.wjoin(filename), False, True)
556
557
557 def wwritedata(self, filename, data):
558 def wwritedata(self, filename, data):
558 return self._filter("decode", filename, data)
559 return self._filter("decode", filename, data)
559
560
560 def transaction(self):
561 def transaction(self):
561 tr = self._transref and self._transref() or None
562 tr = self._transref and self._transref() or None
562 if tr and tr.running():
563 if tr and tr.running():
563 return tr.nest()
564 return tr.nest()
564
565
565 # abort here if the journal already exists
566 # abort here if the journal already exists
566 if os.path.exists(self.sjoin("journal")):
567 if os.path.exists(self.sjoin("journal")):
567 raise error.RepoError(
568 raise error.RepoError(
568 _("abandoned transaction found - run hg recover"))
569 _("abandoned transaction found - run hg recover"))
569
570
570 # save dirstate for rollback
571 # save dirstate for rollback
571 try:
572 try:
572 ds = self.opener("dirstate").read()
573 ds = self.opener("dirstate").read()
573 except IOError:
574 except IOError:
574 ds = ""
575 ds = ""
575 self.opener("journal.dirstate", "w").write(ds)
576 self.opener("journal.dirstate", "w").write(ds)
576 self.opener("journal.branch", "w").write(self.dirstate.branch())
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
577
578
578 renames = [(self.sjoin("journal"), self.sjoin("undo")),
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
579 (self.join("journal.dirstate"), self.join("undo.dirstate")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
580 (self.join("journal.branch"), self.join("undo.branch"))]
581 (self.join("journal.branch"), self.join("undo.branch"))]
581 tr = transaction.transaction(self.ui.warn, self.sopener,
582 tr = transaction.transaction(self.ui.warn, self.sopener,
582 self.sjoin("journal"),
583 self.sjoin("journal"),
583 aftertrans(renames),
584 aftertrans(renames),
584 self.store.createmode)
585 self.store.createmode)
585 self._transref = weakref.ref(tr)
586 self._transref = weakref.ref(tr)
586 return tr
587 return tr
587
588
588 def recover(self):
589 def recover(self):
589 lock = self.lock()
590 lock = self.lock()
590 try:
591 try:
591 if os.path.exists(self.sjoin("journal")):
592 if os.path.exists(self.sjoin("journal")):
592 self.ui.status(_("rolling back interrupted transaction\n"))
593 self.ui.status(_("rolling back interrupted transaction\n"))
593 transaction.rollback(self.sopener, self.sjoin("journal"),
594 transaction.rollback(self.sopener, self.sjoin("journal"),
594 self.ui.warn)
595 self.ui.warn)
595 self.invalidate()
596 self.invalidate()
596 return True
597 return True
597 else:
598 else:
598 self.ui.warn(_("no interrupted transaction available\n"))
599 self.ui.warn(_("no interrupted transaction available\n"))
599 return False
600 return False
600 finally:
601 finally:
601 lock.release()
602 lock.release()
602
603
603 def rollback(self):
604 def rollback(self):
604 wlock = lock = None
605 wlock = lock = None
605 try:
606 try:
606 wlock = self.wlock()
607 wlock = self.wlock()
607 lock = self.lock()
608 lock = self.lock()
608 if os.path.exists(self.sjoin("undo")):
609 if os.path.exists(self.sjoin("undo")):
609 self.ui.status(_("rolling back last transaction\n"))
610 self.ui.status(_("rolling back last transaction\n"))
610 transaction.rollback(self.sopener, self.sjoin("undo"),
611 transaction.rollback(self.sopener, self.sjoin("undo"),
611 self.ui.warn)
612 self.ui.warn)
612 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
613 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
613 try:
614 try:
614 branch = self.opener("undo.branch").read()
615 branch = self.opener("undo.branch").read()
615 self.dirstate.setbranch(branch)
616 self.dirstate.setbranch(branch)
616 except IOError:
617 except IOError:
617 self.ui.warn(_("Named branch could not be reset, "
618 self.ui.warn(_("Named branch could not be reset, "
618 "current branch still is: %s\n")
619 "current branch still is: %s\n")
619 % encoding.tolocal(self.dirstate.branch()))
620 % encoding.tolocal(self.dirstate.branch()))
620 self.invalidate()
621 self.invalidate()
621 self.dirstate.invalidate()
622 self.dirstate.invalidate()
622 self.destroyed()
623 self.destroyed()
623 else:
624 else:
624 self.ui.warn(_("no rollback information available\n"))
625 self.ui.warn(_("no rollback information available\n"))
625 finally:
626 finally:
626 release(lock, wlock)
627 release(lock, wlock)
627
628
628 def invalidate(self):
629 def invalidate(self):
629 for a in "changelog manifest".split():
630 for a in "changelog manifest".split():
630 if a in self.__dict__:
631 if a in self.__dict__:
631 delattr(self, a)
632 delattr(self, a)
632 self._tags = None
633 self._tags = None
633 self._tagtypes = None
634 self._tagtypes = None
634 self.nodetagscache = None
635 self.nodetagscache = None
635 self._branchcache = None # in UTF-8
636 self._branchcache = None # in UTF-8
636 self._branchcachetip = None
637 self._branchcachetip = None
637
638
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
639 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
639 try:
640 try:
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 except error.LockHeld, inst:
642 except error.LockHeld, inst:
642 if not wait:
643 if not wait:
643 raise
644 raise
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
645 self.ui.warn(_("waiting for lock on %s held by %r\n") %
645 (desc, inst.locker))
646 (desc, inst.locker))
646 # default to 600 seconds timeout
647 # default to 600 seconds timeout
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 releasefn, desc=desc)
649 releasefn, desc=desc)
649 if acquirefn:
650 if acquirefn:
650 acquirefn()
651 acquirefn()
651 return l
652 return l
652
653
653 def lock(self, wait=True):
654 def lock(self, wait=True):
654 '''Lock the repository store (.hg/store) and return a weak reference
655 '''Lock the repository store (.hg/store) and return a weak reference
655 to the lock. Use this before modifying the store (e.g. committing or
656 to the lock. Use this before modifying the store (e.g. committing or
656 stripping). If you are opening a transaction, get a lock as well.)'''
657 stripping). If you are opening a transaction, get a lock as well.)'''
657 l = self._lockref and self._lockref()
658 l = self._lockref and self._lockref()
658 if l is not None and l.held:
659 if l is not None and l.held:
659 l.lock()
660 l.lock()
660 return l
661 return l
661
662
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 _('repository %s') % self.origroot)
664 _('repository %s') % self.origroot)
664 self._lockref = weakref.ref(l)
665 self._lockref = weakref.ref(l)
665 return l
666 return l
666
667
667 def wlock(self, wait=True):
668 def wlock(self, wait=True):
668 '''Lock the non-store parts of the repository (everything under
669 '''Lock the non-store parts of the repository (everything under
669 .hg except .hg/store) and return a weak reference to the lock.
670 .hg except .hg/store) and return a weak reference to the lock.
670 Use this before modifying files in .hg.'''
671 Use this before modifying files in .hg.'''
671 l = self._wlockref and self._wlockref()
672 l = self._wlockref and self._wlockref()
672 if l is not None and l.held:
673 if l is not None and l.held:
673 l.lock()
674 l.lock()
674 return l
675 return l
675
676
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
678 self.dirstate.invalidate, _('working directory of %s') %
678 self.origroot)
679 self.origroot)
679 self._wlockref = weakref.ref(l)
680 self._wlockref = weakref.ref(l)
680 return l
681 return l
681
682
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 """
684 """
684 commit an individual file as part of a larger transaction
685 commit an individual file as part of a larger transaction
685 """
686 """
686
687
687 fname = fctx.path()
688 fname = fctx.path()
688 text = fctx.data()
689 text = fctx.data()
689 flog = self.file(fname)
690 flog = self.file(fname)
690 fparent1 = manifest1.get(fname, nullid)
691 fparent1 = manifest1.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
692 fparent2 = fparent2o = manifest2.get(fname, nullid)
692
693
693 meta = {}
694 meta = {}
694 copy = fctx.renamed()
695 copy = fctx.renamed()
695 if copy and copy[0] != fname:
696 if copy and copy[0] != fname:
696 # Mark the new revision of this file as a copy of another
697 # Mark the new revision of this file as a copy of another
697 # file. This copy data will effectively act as a parent
698 # file. This copy data will effectively act as a parent
698 # of this new revision. If this is a merge, the first
699 # of this new revision. If this is a merge, the first
699 # parent will be the nullid (meaning "look up the copy data")
700 # parent will be the nullid (meaning "look up the copy data")
700 # and the second one will be the other parent. For example:
701 # and the second one will be the other parent. For example:
701 #
702 #
702 # 0 --- 1 --- 3 rev1 changes file foo
703 # 0 --- 1 --- 3 rev1 changes file foo
703 # \ / rev2 renames foo to bar and changes it
704 # \ / rev2 renames foo to bar and changes it
704 # \- 2 -/ rev3 should have bar with all changes and
705 # \- 2 -/ rev3 should have bar with all changes and
705 # should record that bar descends from
706 # should record that bar descends from
706 # bar in rev2 and foo in rev1
707 # bar in rev2 and foo in rev1
707 #
708 #
708 # this allows this merge to succeed:
709 # this allows this merge to succeed:
709 #
710 #
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
712 # \- 2 --- 4 as the merge base
713 # \- 2 --- 4 as the merge base
713 #
714 #
714
715
715 cfname = copy[0]
716 cfname = copy[0]
716 crev = manifest1.get(cfname)
717 crev = manifest1.get(cfname)
717 newfparent = fparent2
718 newfparent = fparent2
718
719
719 if manifest2: # branch merge
720 if manifest2: # branch merge
720 if fparent2 == nullid or crev is None: # copied on remote side
721 if fparent2 == nullid or crev is None: # copied on remote side
721 if cfname in manifest2:
722 if cfname in manifest2:
722 crev = manifest2[cfname]
723 crev = manifest2[cfname]
723 newfparent = fparent1
724 newfparent = fparent1
724
725
725 # find source in nearest ancestor if we've lost track
726 # find source in nearest ancestor if we've lost track
726 if not crev:
727 if not crev:
727 self.ui.debug(" %s: searching for copy revision for %s\n" %
728 self.ui.debug(" %s: searching for copy revision for %s\n" %
728 (fname, cfname))
729 (fname, cfname))
729 for ancestor in self['.'].ancestors():
730 for ancestor in self['.'].ancestors():
730 if cfname in ancestor:
731 if cfname in ancestor:
731 crev = ancestor[cfname].filenode()
732 crev = ancestor[cfname].filenode()
732 break
733 break
733
734
734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
735 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
735 meta["copy"] = cfname
736 meta["copy"] = cfname
736 meta["copyrev"] = hex(crev)
737 meta["copyrev"] = hex(crev)
737 fparent1, fparent2 = nullid, newfparent
738 fparent1, fparent2 = nullid, newfparent
738 elif fparent2 != nullid:
739 elif fparent2 != nullid:
739 # is one parent an ancestor of the other?
740 # is one parent an ancestor of the other?
740 fparentancestor = flog.ancestor(fparent1, fparent2)
741 fparentancestor = flog.ancestor(fparent1, fparent2)
741 if fparentancestor == fparent1:
742 if fparentancestor == fparent1:
742 fparent1, fparent2 = fparent2, nullid
743 fparent1, fparent2 = fparent2, nullid
743 elif fparentancestor == fparent2:
744 elif fparentancestor == fparent2:
744 fparent2 = nullid
745 fparent2 = nullid
745
746
746 # is the file changed?
747 # is the file changed?
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 changelist.append(fname)
749 changelist.append(fname)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
750 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
750
751
751 # are just the flags changed during merge?
752 # are just the flags changed during merge?
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 changelist.append(fname)
754 changelist.append(fname)
754
755
755 return fparent1
756 return fparent1
756
757
757 def commit(self, text="", user=None, date=None, match=None, force=False,
758 def commit(self, text="", user=None, date=None, match=None, force=False,
758 editor=False, extra={}):
759 editor=False, extra={}):
759 """Add a new revision to current repository.
760 """Add a new revision to current repository.
760
761
761 Revision information is gathered from the working directory,
762 Revision information is gathered from the working directory,
762 match can be used to filter the committed files. If editor is
763 match can be used to filter the committed files. If editor is
763 supplied, it is called to get a commit message.
764 supplied, it is called to get a commit message.
764 """
765 """
765
766
766 def fail(f, msg):
767 def fail(f, msg):
767 raise util.Abort('%s: %s' % (f, msg))
768 raise util.Abort('%s: %s' % (f, msg))
768
769
769 if not match:
770 if not match:
770 match = match_.always(self.root, '')
771 match = match_.always(self.root, '')
771
772
772 if not force:
773 if not force:
773 vdirs = []
774 vdirs = []
774 match.dir = vdirs.append
775 match.dir = vdirs.append
775 match.bad = fail
776 match.bad = fail
776
777
777 wlock = self.wlock()
778 wlock = self.wlock()
778 try:
779 try:
779 p1, p2 = self.dirstate.parents()
780 p1, p2 = self.dirstate.parents()
780 wctx = self[None]
781 wctx = self[None]
781
782
782 if (not force and p2 != nullid and match and
783 if (not force and p2 != nullid and match and
783 (match.files() or match.anypats())):
784 (match.files() or match.anypats())):
784 raise util.Abort(_('cannot partially commit a merge '
785 raise util.Abort(_('cannot partially commit a merge '
785 '(do not specify files or patterns)'))
786 '(do not specify files or patterns)'))
786
787
787 changes = self.status(match=match, clean=force)
788 changes = self.status(match=match, clean=force)
788 if force:
789 if force:
789 changes[0].extend(changes[6]) # mq may commit unchanged files
790 changes[0].extend(changes[6]) # mq may commit unchanged files
790
791
791 # check subrepos
792 # check subrepos
792 subs = []
793 subs = []
793 for s in wctx.substate:
794 for s in wctx.substate:
794 if match(s) and wctx.sub(s).dirty():
795 if match(s) and wctx.sub(s).dirty():
795 subs.append(s)
796 subs.append(s)
796 if subs and '.hgsubstate' not in changes[0]:
797 if subs and '.hgsubstate' not in changes[0]:
797 changes[0].insert(0, '.hgsubstate')
798 changes[0].insert(0, '.hgsubstate')
798
799
799 # make sure all explicit patterns are matched
800 # make sure all explicit patterns are matched
800 if not force and match.files():
801 if not force and match.files():
801 matched = set(changes[0] + changes[1] + changes[2])
802 matched = set(changes[0] + changes[1] + changes[2])
802
803
803 for f in match.files():
804 for f in match.files():
804 if f == '.' or f in matched or f in wctx.substate:
805 if f == '.' or f in matched or f in wctx.substate:
805 continue
806 continue
806 if f in changes[3]: # missing
807 if f in changes[3]: # missing
807 fail(f, _('file not found!'))
808 fail(f, _('file not found!'))
808 if f in vdirs: # visited directory
809 if f in vdirs: # visited directory
809 d = f + '/'
810 d = f + '/'
810 for mf in matched:
811 for mf in matched:
811 if mf.startswith(d):
812 if mf.startswith(d):
812 break
813 break
813 else:
814 else:
814 fail(f, _("no match under directory!"))
815 fail(f, _("no match under directory!"))
815 elif f not in self.dirstate:
816 elif f not in self.dirstate:
816 fail(f, _("file not tracked!"))
817 fail(f, _("file not tracked!"))
817
818
818 if (not force and not extra.get("close") and p2 == nullid
819 if (not force and not extra.get("close") and p2 == nullid
819 and not (changes[0] or changes[1] or changes[2])
820 and not (changes[0] or changes[1] or changes[2])
820 and self[None].branch() == self['.'].branch()):
821 and self[None].branch() == self['.'].branch()):
821 return None
822 return None
822
823
823 ms = merge_.mergestate(self)
824 ms = merge_.mergestate(self)
824 for f in changes[0]:
825 for f in changes[0]:
825 if f in ms and ms[f] == 'u':
826 if f in ms and ms[f] == 'u':
826 raise util.Abort(_("unresolved merge conflicts "
827 raise util.Abort(_("unresolved merge conflicts "
827 "(see hg resolve)"))
828 "(see hg resolve)"))
828
829
829 cctx = context.workingctx(self, (p1, p2), text, user, date,
830 cctx = context.workingctx(self, (p1, p2), text, user, date,
830 extra, changes)
831 extra, changes)
831 if editor:
832 if editor:
832 cctx._text = editor(self, cctx, subs)
833 cctx._text = editor(self, cctx, subs)
833 edited = (text != cctx._text)
834 edited = (text != cctx._text)
834
835
835 # commit subs
836 # commit subs
836 if subs:
837 if subs:
837 state = wctx.substate.copy()
838 state = wctx.substate.copy()
838 for s in subs:
839 for s in subs:
839 self.ui.status(_('committing subrepository %s\n') % s)
840 self.ui.status(_('committing subrepository %s\n') % s)
840 sr = wctx.sub(s).commit(cctx._text, user, date)
841 sr = wctx.sub(s).commit(cctx._text, user, date)
841 state[s] = (state[s][0], sr)
842 state[s] = (state[s][0], sr)
842 subrepo.writestate(self, state)
843 subrepo.writestate(self, state)
843
844
844 # Save commit message in case this transaction gets rolled back
845 # Save commit message in case this transaction gets rolled back
845 # (e.g. by a pretxncommit hook). Leave the content alone on
846 # (e.g. by a pretxncommit hook). Leave the content alone on
846 # the assumption that the user will use the same editor again.
847 # the assumption that the user will use the same editor again.
847 msgfile = self.opener('last-message.txt', 'wb')
848 msgfile = self.opener('last-message.txt', 'wb')
848 msgfile.write(cctx._text)
849 msgfile.write(cctx._text)
849 msgfile.close()
850 msgfile.close()
850
851
851 try:
852 try:
852 ret = self.commitctx(cctx, True)
853 ret = self.commitctx(cctx, True)
853 except:
854 except:
854 if edited:
855 if edited:
855 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
856 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
856 self.ui.write(
857 self.ui.write(
857 _('note: commit message saved in %s\n') % msgfn)
858 _('note: commit message saved in %s\n') % msgfn)
858 raise
859 raise
859
860
860 # update dirstate and mergestate
861 # update dirstate and mergestate
861 for f in changes[0] + changes[1]:
862 for f in changes[0] + changes[1]:
862 self.dirstate.normal(f)
863 self.dirstate.normal(f)
863 for f in changes[2]:
864 for f in changes[2]:
864 self.dirstate.forget(f)
865 self.dirstate.forget(f)
865 self.dirstate.setparents(ret)
866 self.dirstate.setparents(ret)
866 ms.reset()
867 ms.reset()
867
868
868 return ret
869 return ret
869
870
870 finally:
871 finally:
871 wlock.release()
872 wlock.release()
872
873
873 def commitctx(self, ctx, error=False):
874 def commitctx(self, ctx, error=False):
874 """Add a new revision to current repository.
875 """Add a new revision to current repository.
875
876
876 Revision information is passed via the context argument.
877 Revision information is passed via the context argument.
877 """
878 """
878
879
879 tr = lock = None
880 tr = lock = None
880 removed = ctx.removed()
881 removed = ctx.removed()
881 p1, p2 = ctx.p1(), ctx.p2()
882 p1, p2 = ctx.p1(), ctx.p2()
882 m1 = p1.manifest().copy()
883 m1 = p1.manifest().copy()
883 m2 = p2.manifest()
884 m2 = p2.manifest()
884 user = ctx.user()
885 user = ctx.user()
885
886
886 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
887 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
887 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
888 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
888
889
889 lock = self.lock()
890 lock = self.lock()
890 try:
891 try:
891 tr = self.transaction()
892 tr = self.transaction()
892 trp = weakref.proxy(tr)
893 trp = weakref.proxy(tr)
893
894
894 # check in files
895 # check in files
895 new = {}
896 new = {}
896 changed = []
897 changed = []
897 linkrev = len(self)
898 linkrev = len(self)
898 for f in sorted(ctx.modified() + ctx.added()):
899 for f in sorted(ctx.modified() + ctx.added()):
899 self.ui.note(f + "\n")
900 self.ui.note(f + "\n")
900 try:
901 try:
901 fctx = ctx[f]
902 fctx = ctx[f]
902 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
903 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
903 changed)
904 changed)
904 m1.set(f, fctx.flags())
905 m1.set(f, fctx.flags())
905 except (OSError, IOError):
906 except (OSError, IOError):
906 if error:
907 if error:
907 self.ui.warn(_("trouble committing %s!\n") % f)
908 self.ui.warn(_("trouble committing %s!\n") % f)
908 raise
909 raise
909 else:
910 else:
910 removed.append(f)
911 removed.append(f)
911
912
912 # update manifest
913 # update manifest
913 m1.update(new)
914 m1.update(new)
914 removed = [f for f in sorted(removed) if f in m1 or f in m2]
915 removed = [f for f in sorted(removed) if f in m1 or f in m2]
915 drop = [f for f in removed if f in m1]
916 drop = [f for f in removed if f in m1]
916 for f in drop:
917 for f in drop:
917 del m1[f]
918 del m1[f]
918 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
919 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
919 p2.manifestnode(), (new, drop))
920 p2.manifestnode(), (new, drop))
920
921
921 # update changelog
922 # update changelog
922 self.changelog.delayupdate()
923 self.changelog.delayupdate()
923 n = self.changelog.add(mn, changed + removed, ctx.description(),
924 n = self.changelog.add(mn, changed + removed, ctx.description(),
924 trp, p1.node(), p2.node(),
925 trp, p1.node(), p2.node(),
925 user, ctx.date(), ctx.extra().copy())
926 user, ctx.date(), ctx.extra().copy())
926 p = lambda: self.changelog.writepending() and self.root or ""
927 p = lambda: self.changelog.writepending() and self.root or ""
927 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
928 parent2=xp2, pending=p)
929 parent2=xp2, pending=p)
929 self.changelog.finalize(trp)
930 self.changelog.finalize(trp)
930 tr.close()
931 tr.close()
931
932
932 if self._branchcache:
933 if self._branchcache:
933 self.branchtags()
934 self.branchtags()
934
935
935 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
936 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
936 return n
937 return n
937 finally:
938 finally:
938 del tr
939 del tr
939 lock.release()
940 lock.release()
940
941
941 def destroyed(self):
942 def destroyed(self):
942 '''Inform the repository that nodes have been destroyed.
943 '''Inform the repository that nodes have been destroyed.
943 Intended for use by strip and rollback, so there's a common
944 Intended for use by strip and rollback, so there's a common
944 place for anything that has to be done after destroying history.'''
945 place for anything that has to be done after destroying history.'''
945 # XXX it might be nice if we could take the list of destroyed
946 # XXX it might be nice if we could take the list of destroyed
946 # nodes, but I don't see an easy way for rollback() to do that
947 # nodes, but I don't see an easy way for rollback() to do that
947
948
948 # Ensure the persistent tag cache is updated. Doing it now
949 # Ensure the persistent tag cache is updated. Doing it now
949 # means that the tag cache only has to worry about destroyed
950 # means that the tag cache only has to worry about destroyed
950 # heads immediately after a strip/rollback. That in turn
951 # heads immediately after a strip/rollback. That in turn
951 # guarantees that "cachetip == currenttip" (comparing both rev
952 # guarantees that "cachetip == currenttip" (comparing both rev
952 # and node) always means no nodes have been added or destroyed.
953 # and node) always means no nodes have been added or destroyed.
953
954
954 # XXX this is suboptimal when qrefresh'ing: we strip the current
955 # XXX this is suboptimal when qrefresh'ing: we strip the current
955 # head, refresh the tag cache, then immediately add a new head.
956 # head, refresh the tag cache, then immediately add a new head.
956 # But I think doing it this way is necessary for the "instant
957 # But I think doing it this way is necessary for the "instant
957 # tag cache retrieval" case to work.
958 # tag cache retrieval" case to work.
958 tags_.findglobaltags(self.ui, self, {}, {})
959 tags_.findglobaltags(self.ui, self, {}, {})
959
960
960 def walk(self, match, node=None):
961 def walk(self, match, node=None):
961 '''
962 '''
962 walk recursively through the directory tree or a given
963 walk recursively through the directory tree or a given
963 changeset, finding all files matched by the match
964 changeset, finding all files matched by the match
964 function
965 function
965 '''
966 '''
966 return self[node].walk(match)
967 return self[node].walk(match)
967
968
968 def status(self, node1='.', node2=None, match=None,
969 def status(self, node1='.', node2=None, match=None,
969 ignored=False, clean=False, unknown=False):
970 ignored=False, clean=False, unknown=False):
970 """return status of files between two nodes or node and working directory
971 """return status of files between two nodes or node and working directory
971
972
972 If node1 is None, use the first dirstate parent instead.
973 If node1 is None, use the first dirstate parent instead.
973 If node2 is None, compare node1 with working directory.
974 If node2 is None, compare node1 with working directory.
974 """
975 """
975
976
976 def mfmatches(ctx):
977 def mfmatches(ctx):
977 mf = ctx.manifest().copy()
978 mf = ctx.manifest().copy()
978 for fn in mf.keys():
979 for fn in mf.keys():
979 if not match(fn):
980 if not match(fn):
980 del mf[fn]
981 del mf[fn]
981 return mf
982 return mf
982
983
983 if isinstance(node1, context.changectx):
984 if isinstance(node1, context.changectx):
984 ctx1 = node1
985 ctx1 = node1
985 else:
986 else:
986 ctx1 = self[node1]
987 ctx1 = self[node1]
987 if isinstance(node2, context.changectx):
988 if isinstance(node2, context.changectx):
988 ctx2 = node2
989 ctx2 = node2
989 else:
990 else:
990 ctx2 = self[node2]
991 ctx2 = self[node2]
991
992
992 working = ctx2.rev() is None
993 working = ctx2.rev() is None
993 parentworking = working and ctx1 == self['.']
994 parentworking = working and ctx1 == self['.']
994 match = match or match_.always(self.root, self.getcwd())
995 match = match or match_.always(self.root, self.getcwd())
995 listignored, listclean, listunknown = ignored, clean, unknown
996 listignored, listclean, listunknown = ignored, clean, unknown
996
997
997 # load earliest manifest first for caching reasons
998 # load earliest manifest first for caching reasons
998 if not working and ctx2.rev() < ctx1.rev():
999 if not working and ctx2.rev() < ctx1.rev():
999 ctx2.manifest()
1000 ctx2.manifest()
1000
1001
1001 if not parentworking:
1002 if not parentworking:
1002 def bad(f, msg):
1003 def bad(f, msg):
1003 if f not in ctx1:
1004 if f not in ctx1:
1004 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1005 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1005 match.bad = bad
1006 match.bad = bad
1006
1007
1007 if working: # we need to scan the working dir
1008 if working: # we need to scan the working dir
1008 subrepos = ctx1.substate.keys()
1009 subrepos = ctx1.substate.keys()
1009 s = self.dirstate.status(match, subrepos, listignored,
1010 s = self.dirstate.status(match, subrepos, listignored,
1010 listclean, listunknown)
1011 listclean, listunknown)
1011 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1012 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1012
1013
1013 # check for any possibly clean files
1014 # check for any possibly clean files
1014 if parentworking and cmp:
1015 if parentworking and cmp:
1015 fixup = []
1016 fixup = []
1016 # do a full compare of any files that might have changed
1017 # do a full compare of any files that might have changed
1017 for f in sorted(cmp):
1018 for f in sorted(cmp):
1018 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1019 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1019 or ctx1[f].cmp(ctx2[f].data())):
1020 or ctx1[f].cmp(ctx2[f].data())):
1020 modified.append(f)
1021 modified.append(f)
1021 else:
1022 else:
1022 fixup.append(f)
1023 fixup.append(f)
1023
1024
1024 if listclean:
1025 if listclean:
1025 clean += fixup
1026 clean += fixup
1026
1027
1027 # update dirstate for files that are actually clean
1028 # update dirstate for files that are actually clean
1028 if fixup:
1029 if fixup:
1029 try:
1030 try:
1030 # updating the dirstate is optional
1031 # updating the dirstate is optional
1031 # so we don't wait on the lock
1032 # so we don't wait on the lock
1032 wlock = self.wlock(False)
1033 wlock = self.wlock(False)
1033 try:
1034 try:
1034 for f in fixup:
1035 for f in fixup:
1035 self.dirstate.normal(f)
1036 self.dirstate.normal(f)
1036 finally:
1037 finally:
1037 wlock.release()
1038 wlock.release()
1038 except error.LockError:
1039 except error.LockError:
1039 pass
1040 pass
1040
1041
1041 if not parentworking:
1042 if not parentworking:
1042 mf1 = mfmatches(ctx1)
1043 mf1 = mfmatches(ctx1)
1043 if working:
1044 if working:
1044 # we are comparing working dir against non-parent
1045 # we are comparing working dir against non-parent
1045 # generate a pseudo-manifest for the working dir
1046 # generate a pseudo-manifest for the working dir
1046 mf2 = mfmatches(self['.'])
1047 mf2 = mfmatches(self['.'])
1047 for f in cmp + modified + added:
1048 for f in cmp + modified + added:
1048 mf2[f] = None
1049 mf2[f] = None
1049 mf2.set(f, ctx2.flags(f))
1050 mf2.set(f, ctx2.flags(f))
1050 for f in removed:
1051 for f in removed:
1051 if f in mf2:
1052 if f in mf2:
1052 del mf2[f]
1053 del mf2[f]
1053 else:
1054 else:
1054 # we are comparing two revisions
1055 # we are comparing two revisions
1055 deleted, unknown, ignored = [], [], []
1056 deleted, unknown, ignored = [], [], []
1056 mf2 = mfmatches(ctx2)
1057 mf2 = mfmatches(ctx2)
1057
1058
1058 modified, added, clean = [], [], []
1059 modified, added, clean = [], [], []
1059 for fn in mf2:
1060 for fn in mf2:
1060 if fn in mf1:
1061 if fn in mf1:
1061 if (mf1.flags(fn) != mf2.flags(fn) or
1062 if (mf1.flags(fn) != mf2.flags(fn) or
1062 (mf1[fn] != mf2[fn] and
1063 (mf1[fn] != mf2[fn] and
1063 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1064 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1064 modified.append(fn)
1065 modified.append(fn)
1065 elif listclean:
1066 elif listclean:
1066 clean.append(fn)
1067 clean.append(fn)
1067 del mf1[fn]
1068 del mf1[fn]
1068 else:
1069 else:
1069 added.append(fn)
1070 added.append(fn)
1070 removed = mf1.keys()
1071 removed = mf1.keys()
1071
1072
1072 r = modified, added, removed, deleted, unknown, ignored, clean
1073 r = modified, added, removed, deleted, unknown, ignored, clean
1073 [l.sort() for l in r]
1074 [l.sort() for l in r]
1074 return r
1075 return r
1075
1076
1076 def add(self, list):
1077 def add(self, list):
1077 wlock = self.wlock()
1078 wlock = self.wlock()
1078 try:
1079 try:
1079 rejected = []
1080 rejected = []
1080 for f in list:
1081 for f in list:
1081 p = self.wjoin(f)
1082 p = self.wjoin(f)
1082 try:
1083 try:
1083 st = os.lstat(p)
1084 st = os.lstat(p)
1084 except:
1085 except:
1085 self.ui.warn(_("%s does not exist!\n") % f)
1086 self.ui.warn(_("%s does not exist!\n") % f)
1086 rejected.append(f)
1087 rejected.append(f)
1087 continue
1088 continue
1088 if st.st_size > 10000000:
1089 if st.st_size > 10000000:
1089 self.ui.warn(_("%s: files over 10MB may cause memory and"
1090 self.ui.warn(_("%s: files over 10MB may cause memory and"
1090 " performance problems\n"
1091 " performance problems\n"
1091 "(use 'hg revert %s' to unadd the file)\n")
1092 "(use 'hg revert %s' to unadd the file)\n")
1092 % (f, f))
1093 % (f, f))
1093 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1094 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1094 self.ui.warn(_("%s not added: only files and symlinks "
1095 self.ui.warn(_("%s not added: only files and symlinks "
1095 "supported currently\n") % f)
1096 "supported currently\n") % f)
1096 rejected.append(p)
1097 rejected.append(p)
1097 elif self.dirstate[f] in 'amn':
1098 elif self.dirstate[f] in 'amn':
1098 self.ui.warn(_("%s already tracked!\n") % f)
1099 self.ui.warn(_("%s already tracked!\n") % f)
1099 elif self.dirstate[f] == 'r':
1100 elif self.dirstate[f] == 'r':
1100 self.dirstate.normallookup(f)
1101 self.dirstate.normallookup(f)
1101 else:
1102 else:
1102 self.dirstate.add(f)
1103 self.dirstate.add(f)
1103 return rejected
1104 return rejected
1104 finally:
1105 finally:
1105 wlock.release()
1106 wlock.release()
1106
1107
1107 def forget(self, list):
1108 def forget(self, list):
1108 wlock = self.wlock()
1109 wlock = self.wlock()
1109 try:
1110 try:
1110 for f in list:
1111 for f in list:
1111 if self.dirstate[f] != 'a':
1112 if self.dirstate[f] != 'a':
1112 self.ui.warn(_("%s not added!\n") % f)
1113 self.ui.warn(_("%s not added!\n") % f)
1113 else:
1114 else:
1114 self.dirstate.forget(f)
1115 self.dirstate.forget(f)
1115 finally:
1116 finally:
1116 wlock.release()
1117 wlock.release()
1117
1118
1118 def remove(self, list, unlink=False):
1119 def remove(self, list, unlink=False):
1119 if unlink:
1120 if unlink:
1120 for f in list:
1121 for f in list:
1121 try:
1122 try:
1122 util.unlink(self.wjoin(f))
1123 util.unlink(self.wjoin(f))
1123 except OSError, inst:
1124 except OSError, inst:
1124 if inst.errno != errno.ENOENT:
1125 if inst.errno != errno.ENOENT:
1125 raise
1126 raise
1126 wlock = self.wlock()
1127 wlock = self.wlock()
1127 try:
1128 try:
1128 for f in list:
1129 for f in list:
1129 if unlink and os.path.exists(self.wjoin(f)):
1130 if unlink and os.path.exists(self.wjoin(f)):
1130 self.ui.warn(_("%s still exists!\n") % f)
1131 self.ui.warn(_("%s still exists!\n") % f)
1131 elif self.dirstate[f] == 'a':
1132 elif self.dirstate[f] == 'a':
1132 self.dirstate.forget(f)
1133 self.dirstate.forget(f)
1133 elif f not in self.dirstate:
1134 elif f not in self.dirstate:
1134 self.ui.warn(_("%s not tracked!\n") % f)
1135 self.ui.warn(_("%s not tracked!\n") % f)
1135 else:
1136 else:
1136 self.dirstate.remove(f)
1137 self.dirstate.remove(f)
1137 finally:
1138 finally:
1138 wlock.release()
1139 wlock.release()
1139
1140
1140 def undelete(self, list):
1141 def undelete(self, list):
1141 manifests = [self.manifest.read(self.changelog.read(p)[0])
1142 manifests = [self.manifest.read(self.changelog.read(p)[0])
1142 for p in self.dirstate.parents() if p != nullid]
1143 for p in self.dirstate.parents() if p != nullid]
1143 wlock = self.wlock()
1144 wlock = self.wlock()
1144 try:
1145 try:
1145 for f in list:
1146 for f in list:
1146 if self.dirstate[f] != 'r':
1147 if self.dirstate[f] != 'r':
1147 self.ui.warn(_("%s not removed!\n") % f)
1148 self.ui.warn(_("%s not removed!\n") % f)
1148 else:
1149 else:
1149 m = f in manifests[0] and manifests[0] or manifests[1]
1150 m = f in manifests[0] and manifests[0] or manifests[1]
1150 t = self.file(f).read(m[f])
1151 t = self.file(f).read(m[f])
1151 self.wwrite(f, t, m.flags(f))
1152 self.wwrite(f, t, m.flags(f))
1152 self.dirstate.normal(f)
1153 self.dirstate.normal(f)
1153 finally:
1154 finally:
1154 wlock.release()
1155 wlock.release()
1155
1156
1156 def copy(self, source, dest):
1157 def copy(self, source, dest):
1157 p = self.wjoin(dest)
1158 p = self.wjoin(dest)
1158 if not (os.path.exists(p) or os.path.islink(p)):
1159 if not (os.path.exists(p) or os.path.islink(p)):
1159 self.ui.warn(_("%s does not exist!\n") % dest)
1160 self.ui.warn(_("%s does not exist!\n") % dest)
1160 elif not (os.path.isfile(p) or os.path.islink(p)):
1161 elif not (os.path.isfile(p) or os.path.islink(p)):
1161 self.ui.warn(_("copy failed: %s is not a file or a "
1162 self.ui.warn(_("copy failed: %s is not a file or a "
1162 "symbolic link\n") % dest)
1163 "symbolic link\n") % dest)
1163 else:
1164 else:
1164 wlock = self.wlock()
1165 wlock = self.wlock()
1165 try:
1166 try:
1166 if self.dirstate[dest] in '?r':
1167 if self.dirstate[dest] in '?r':
1167 self.dirstate.add(dest)
1168 self.dirstate.add(dest)
1168 self.dirstate.copy(source, dest)
1169 self.dirstate.copy(source, dest)
1169 finally:
1170 finally:
1170 wlock.release()
1171 wlock.release()
1171
1172
1172 def heads(self, start=None):
1173 def heads(self, start=None):
1173 heads = self.changelog.heads(start)
1174 heads = self.changelog.heads(start)
1174 # sort the output in rev descending order
1175 # sort the output in rev descending order
1175 heads = [(-self.changelog.rev(h), h) for h in heads]
1176 heads = [(-self.changelog.rev(h), h) for h in heads]
1176 return [n for (r, n) in sorted(heads)]
1177 return [n for (r, n) in sorted(heads)]
1177
1178
1178 def branchheads(self, branch=None, start=None, closed=False):
1179 def branchheads(self, branch=None, start=None, closed=False):
1179 '''return a (possibly filtered) list of heads for the given branch
1180 '''return a (possibly filtered) list of heads for the given branch
1180
1181
1181 Heads are returned in topological order, from newest to oldest.
1182 Heads are returned in topological order, from newest to oldest.
1182 If branch is None, use the dirstate branch.
1183 If branch is None, use the dirstate branch.
1183 If start is not None, return only heads reachable from start.
1184 If start is not None, return only heads reachable from start.
1184 If closed is True, return heads that are marked as closed as well.
1185 If closed is True, return heads that are marked as closed as well.
1185 '''
1186 '''
1186 if branch is None:
1187 if branch is None:
1187 branch = self[None].branch()
1188 branch = self[None].branch()
1188 branches = self.branchmap()
1189 branches = self.branchmap()
1189 if branch not in branches:
1190 if branch not in branches:
1190 return []
1191 return []
1191 # the cache returns heads ordered lowest to highest
1192 # the cache returns heads ordered lowest to highest
1192 bheads = list(reversed(branches[branch]))
1193 bheads = list(reversed(branches[branch]))
1193 if start is not None:
1194 if start is not None:
1194 # filter out the heads that cannot be reached from startrev
1195 # filter out the heads that cannot be reached from startrev
1195 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1196 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1196 bheads = [h for h in bheads if h in fbheads]
1197 bheads = [h for h in bheads if h in fbheads]
1197 if not closed:
1198 if not closed:
1198 bheads = [h for h in bheads if
1199 bheads = [h for h in bheads if
1199 ('close' not in self.changelog.read(h)[5])]
1200 ('close' not in self.changelog.read(h)[5])]
1200 return bheads
1201 return bheads
1201
1202
1202 def branches(self, nodes):
1203 def branches(self, nodes):
1203 if not nodes:
1204 if not nodes:
1204 nodes = [self.changelog.tip()]
1205 nodes = [self.changelog.tip()]
1205 b = []
1206 b = []
1206 for n in nodes:
1207 for n in nodes:
1207 t = n
1208 t = n
1208 while 1:
1209 while 1:
1209 p = self.changelog.parents(n)
1210 p = self.changelog.parents(n)
1210 if p[1] != nullid or p[0] == nullid:
1211 if p[1] != nullid or p[0] == nullid:
1211 b.append((t, n, p[0], p[1]))
1212 b.append((t, n, p[0], p[1]))
1212 break
1213 break
1213 n = p[0]
1214 n = p[0]
1214 return b
1215 return b
1215
1216
1216 def between(self, pairs):
1217 def between(self, pairs):
1217 r = []
1218 r = []
1218
1219
1219 for top, bottom in pairs:
1220 for top, bottom in pairs:
1220 n, l, i = top, [], 0
1221 n, l, i = top, [], 0
1221 f = 1
1222 f = 1
1222
1223
1223 while n != bottom and n != nullid:
1224 while n != bottom and n != nullid:
1224 p = self.changelog.parents(n)[0]
1225 p = self.changelog.parents(n)[0]
1225 if i == f:
1226 if i == f:
1226 l.append(n)
1227 l.append(n)
1227 f = f * 2
1228 f = f * 2
1228 n = p
1229 n = p
1229 i += 1
1230 i += 1
1230
1231
1231 r.append(l)
1232 r.append(l)
1232
1233
1233 return r
1234 return r
1234
1235
1235 def findincoming(self, remote, base=None, heads=None, force=False):
1236 def findincoming(self, remote, base=None, heads=None, force=False):
1236 """Return list of roots of the subsets of missing nodes from remote
1237 """Return list of roots of the subsets of missing nodes from remote
1237
1238
1238 If base dict is specified, assume that these nodes and their parents
1239 If base dict is specified, assume that these nodes and their parents
1239 exist on the remote side and that no child of a node of base exists
1240 exist on the remote side and that no child of a node of base exists
1240 in both remote and self.
1241 in both remote and self.
1241 Furthermore base will be updated to include the nodes that exists
1242 Furthermore base will be updated to include the nodes that exists
1242 in self and remote but no children exists in self and remote.
1243 in self and remote but no children exists in self and remote.
1243 If a list of heads is specified, return only nodes which are heads
1244 If a list of heads is specified, return only nodes which are heads
1244 or ancestors of these heads.
1245 or ancestors of these heads.
1245
1246
1246 All the ancestors of base are in self and in remote.
1247 All the ancestors of base are in self and in remote.
1247 All the descendants of the list returned are missing in self.
1248 All the descendants of the list returned are missing in self.
1248 (and so we know that the rest of the nodes are missing in remote, see
1249 (and so we know that the rest of the nodes are missing in remote, see
1249 outgoing)
1250 outgoing)
1250 """
1251 """
1251 return self.findcommonincoming(remote, base, heads, force)[1]
1252 return self.findcommonincoming(remote, base, heads, force)[1]
1252
1253
1253 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1254 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1254 """Return a tuple (common, missing roots, heads) used to identify
1255 """Return a tuple (common, missing roots, heads) used to identify
1255 missing nodes from remote.
1256 missing nodes from remote.
1256
1257
1257 If base dict is specified, assume that these nodes and their parents
1258 If base dict is specified, assume that these nodes and their parents
1258 exist on the remote side and that no child of a node of base exists
1259 exist on the remote side and that no child of a node of base exists
1259 in both remote and self.
1260 in both remote and self.
1260 Furthermore base will be updated to include the nodes that exists
1261 Furthermore base will be updated to include the nodes that exists
1261 in self and remote but no children exists in self and remote.
1262 in self and remote but no children exists in self and remote.
1262 If a list of heads is specified, return only nodes which are heads
1263 If a list of heads is specified, return only nodes which are heads
1263 or ancestors of these heads.
1264 or ancestors of these heads.
1264
1265
1265 All the ancestors of base are in self and in remote.
1266 All the ancestors of base are in self and in remote.
1266 """
1267 """
1267 m = self.changelog.nodemap
1268 m = self.changelog.nodemap
1268 search = []
1269 search = []
1269 fetch = set()
1270 fetch = set()
1270 seen = set()
1271 seen = set()
1271 seenbranch = set()
1272 seenbranch = set()
1272 if base is None:
1273 if base is None:
1273 base = {}
1274 base = {}
1274
1275
1275 if not heads:
1276 if not heads:
1276 heads = remote.heads()
1277 heads = remote.heads()
1277
1278
1278 if self.changelog.tip() == nullid:
1279 if self.changelog.tip() == nullid:
1279 base[nullid] = 1
1280 base[nullid] = 1
1280 if heads != [nullid]:
1281 if heads != [nullid]:
1281 return [nullid], [nullid], list(heads)
1282 return [nullid], [nullid], list(heads)
1282 return [nullid], [], []
1283 return [nullid], [], []
1283
1284
1284 # assume we're closer to the tip than the root
1285 # assume we're closer to the tip than the root
1285 # and start by examining the heads
1286 # and start by examining the heads
1286 self.ui.status(_("searching for changes\n"))
1287 self.ui.status(_("searching for changes\n"))
1287
1288
1288 unknown = []
1289 unknown = []
1289 for h in heads:
1290 for h in heads:
1290 if h not in m:
1291 if h not in m:
1291 unknown.append(h)
1292 unknown.append(h)
1292 else:
1293 else:
1293 base[h] = 1
1294 base[h] = 1
1294
1295
1295 heads = unknown
1296 heads = unknown
1296 if not unknown:
1297 if not unknown:
1297 return base.keys(), [], []
1298 return base.keys(), [], []
1298
1299
1299 req = set(unknown)
1300 req = set(unknown)
1300 reqcnt = 0
1301 reqcnt = 0
1301
1302
1302 # search through remote branches
1303 # search through remote branches
1303 # a 'branch' here is a linear segment of history, with four parts:
1304 # a 'branch' here is a linear segment of history, with four parts:
1304 # head, root, first parent, second parent
1305 # head, root, first parent, second parent
1305 # (a branch always has two parents (or none) by definition)
1306 # (a branch always has two parents (or none) by definition)
1306 unknown = remote.branches(unknown)
1307 unknown = remote.branches(unknown)
1307 while unknown:
1308 while unknown:
1308 r = []
1309 r = []
1309 while unknown:
1310 while unknown:
1310 n = unknown.pop(0)
1311 n = unknown.pop(0)
1311 if n[0] in seen:
1312 if n[0] in seen:
1312 continue
1313 continue
1313
1314
1314 self.ui.debug("examining %s:%s\n"
1315 self.ui.debug("examining %s:%s\n"
1315 % (short(n[0]), short(n[1])))
1316 % (short(n[0]), short(n[1])))
1316 if n[0] == nullid: # found the end of the branch
1317 if n[0] == nullid: # found the end of the branch
1317 pass
1318 pass
1318 elif n in seenbranch:
1319 elif n in seenbranch:
1319 self.ui.debug("branch already found\n")
1320 self.ui.debug("branch already found\n")
1320 continue
1321 continue
1321 elif n[1] and n[1] in m: # do we know the base?
1322 elif n[1] and n[1] in m: # do we know the base?
1322 self.ui.debug("found incomplete branch %s:%s\n"
1323 self.ui.debug("found incomplete branch %s:%s\n"
1323 % (short(n[0]), short(n[1])))
1324 % (short(n[0]), short(n[1])))
1324 search.append(n[0:2]) # schedule branch range for scanning
1325 search.append(n[0:2]) # schedule branch range for scanning
1325 seenbranch.add(n)
1326 seenbranch.add(n)
1326 else:
1327 else:
1327 if n[1] not in seen and n[1] not in fetch:
1328 if n[1] not in seen and n[1] not in fetch:
1328 if n[2] in m and n[3] in m:
1329 if n[2] in m and n[3] in m:
1329 self.ui.debug("found new changeset %s\n" %
1330 self.ui.debug("found new changeset %s\n" %
1330 short(n[1]))
1331 short(n[1]))
1331 fetch.add(n[1]) # earliest unknown
1332 fetch.add(n[1]) # earliest unknown
1332 for p in n[2:4]:
1333 for p in n[2:4]:
1333 if p in m:
1334 if p in m:
1334 base[p] = 1 # latest known
1335 base[p] = 1 # latest known
1335
1336
1336 for p in n[2:4]:
1337 for p in n[2:4]:
1337 if p not in req and p not in m:
1338 if p not in req and p not in m:
1338 r.append(p)
1339 r.append(p)
1339 req.add(p)
1340 req.add(p)
1340 seen.add(n[0])
1341 seen.add(n[0])
1341
1342
1342 if r:
1343 if r:
1343 reqcnt += 1
1344 reqcnt += 1
1344 self.ui.debug("request %d: %s\n" %
1345 self.ui.debug("request %d: %s\n" %
1345 (reqcnt, " ".join(map(short, r))))
1346 (reqcnt, " ".join(map(short, r))))
1346 for p in xrange(0, len(r), 10):
1347 for p in xrange(0, len(r), 10):
1347 for b in remote.branches(r[p:p + 10]):
1348 for b in remote.branches(r[p:p + 10]):
1348 self.ui.debug("received %s:%s\n" %
1349 self.ui.debug("received %s:%s\n" %
1349 (short(b[0]), short(b[1])))
1350 (short(b[0]), short(b[1])))
1350 unknown.append(b)
1351 unknown.append(b)
1351
1352
1352 # do binary search on the branches we found
1353 # do binary search on the branches we found
1353 while search:
1354 while search:
1354 newsearch = []
1355 newsearch = []
1355 reqcnt += 1
1356 reqcnt += 1
1356 for n, l in zip(search, remote.between(search)):
1357 for n, l in zip(search, remote.between(search)):
1357 l.append(n[1])
1358 l.append(n[1])
1358 p = n[0]
1359 p = n[0]
1359 f = 1
1360 f = 1
1360 for i in l:
1361 for i in l:
1361 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1362 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1362 if i in m:
1363 if i in m:
1363 if f <= 2:
1364 if f <= 2:
1364 self.ui.debug("found new branch changeset %s\n" %
1365 self.ui.debug("found new branch changeset %s\n" %
1365 short(p))
1366 short(p))
1366 fetch.add(p)
1367 fetch.add(p)
1367 base[i] = 1
1368 base[i] = 1
1368 else:
1369 else:
1369 self.ui.debug("narrowed branch search to %s:%s\n"
1370 self.ui.debug("narrowed branch search to %s:%s\n"
1370 % (short(p), short(i)))
1371 % (short(p), short(i)))
1371 newsearch.append((p, i))
1372 newsearch.append((p, i))
1372 break
1373 break
1373 p, f = i, f * 2
1374 p, f = i, f * 2
1374 search = newsearch
1375 search = newsearch
1375
1376
1376 # sanity check our fetch list
1377 # sanity check our fetch list
1377 for f in fetch:
1378 for f in fetch:
1378 if f in m:
1379 if f in m:
1379 raise error.RepoError(_("already have changeset ")
1380 raise error.RepoError(_("already have changeset ")
1380 + short(f[:4]))
1381 + short(f[:4]))
1381
1382
1382 if base.keys() == [nullid]:
1383 if base.keys() == [nullid]:
1383 if force:
1384 if force:
1384 self.ui.warn(_("warning: repository is unrelated\n"))
1385 self.ui.warn(_("warning: repository is unrelated\n"))
1385 else:
1386 else:
1386 raise util.Abort(_("repository is unrelated"))
1387 raise util.Abort(_("repository is unrelated"))
1387
1388
1388 self.ui.debug("found new changesets starting at " +
1389 self.ui.debug("found new changesets starting at " +
1389 " ".join([short(f) for f in fetch]) + "\n")
1390 " ".join([short(f) for f in fetch]) + "\n")
1390
1391
1391 self.ui.debug("%d total queries\n" % reqcnt)
1392 self.ui.debug("%d total queries\n" % reqcnt)
1392
1393
1393 return base.keys(), list(fetch), heads
1394 return base.keys(), list(fetch), heads
1394
1395
1395 def findoutgoing(self, remote, base=None, heads=None, force=False):
1396 def findoutgoing(self, remote, base=None, heads=None, force=False):
1396 """Return list of nodes that are roots of subsets not in remote
1397 """Return list of nodes that are roots of subsets not in remote
1397
1398
1398 If base dict is specified, assume that these nodes and their parents
1399 If base dict is specified, assume that these nodes and their parents
1399 exist on the remote side.
1400 exist on the remote side.
1400 If a list of heads is specified, return only nodes which are heads
1401 If a list of heads is specified, return only nodes which are heads
1401 or ancestors of these heads, and return a second element which
1402 or ancestors of these heads, and return a second element which
1402 contains all remote heads which get new children.
1403 contains all remote heads which get new children.
1403 """
1404 """
1404 if base is None:
1405 if base is None:
1405 base = {}
1406 base = {}
1406 self.findincoming(remote, base, heads, force=force)
1407 self.findincoming(remote, base, heads, force=force)
1407
1408
1408 self.ui.debug("common changesets up to "
1409 self.ui.debug("common changesets up to "
1409 + " ".join(map(short, base.keys())) + "\n")
1410 + " ".join(map(short, base.keys())) + "\n")
1410
1411
1411 remain = set(self.changelog.nodemap)
1412 remain = set(self.changelog.nodemap)
1412
1413
1413 # prune everything remote has from the tree
1414 # prune everything remote has from the tree
1414 remain.remove(nullid)
1415 remain.remove(nullid)
1415 remove = base.keys()
1416 remove = base.keys()
1416 while remove:
1417 while remove:
1417 n = remove.pop(0)
1418 n = remove.pop(0)
1418 if n in remain:
1419 if n in remain:
1419 remain.remove(n)
1420 remain.remove(n)
1420 for p in self.changelog.parents(n):
1421 for p in self.changelog.parents(n):
1421 remove.append(p)
1422 remove.append(p)
1422
1423
1423 # find every node whose parents have been pruned
1424 # find every node whose parents have been pruned
1424 subset = []
1425 subset = []
1425 # find every remote head that will get new children
1426 # find every remote head that will get new children
1426 updated_heads = set()
1427 updated_heads = set()
1427 for n in remain:
1428 for n in remain:
1428 p1, p2 = self.changelog.parents(n)
1429 p1, p2 = self.changelog.parents(n)
1429 if p1 not in remain and p2 not in remain:
1430 if p1 not in remain and p2 not in remain:
1430 subset.append(n)
1431 subset.append(n)
1431 if heads:
1432 if heads:
1432 if p1 in heads:
1433 if p1 in heads:
1433 updated_heads.add(p1)
1434 updated_heads.add(p1)
1434 if p2 in heads:
1435 if p2 in heads:
1435 updated_heads.add(p2)
1436 updated_heads.add(p2)
1436
1437
1437 # this is the set of all roots we have to push
1438 # this is the set of all roots we have to push
1438 if heads:
1439 if heads:
1439 return subset, list(updated_heads)
1440 return subset, list(updated_heads)
1440 else:
1441 else:
1441 return subset
1442 return subset
1442
1443
1443 def pull(self, remote, heads=None, force=False):
1444 def pull(self, remote, heads=None, force=False):
1444 lock = self.lock()
1445 lock = self.lock()
1445 try:
1446 try:
1446 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1447 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1447 force=force)
1448 force=force)
1448 if fetch == [nullid]:
1449 if fetch == [nullid]:
1449 self.ui.status(_("requesting all changes\n"))
1450 self.ui.status(_("requesting all changes\n"))
1450
1451
1451 if not fetch:
1452 if not fetch:
1452 self.ui.status(_("no changes found\n"))
1453 self.ui.status(_("no changes found\n"))
1453 return 0
1454 return 0
1454
1455
1455 if heads is None and remote.capable('changegroupsubset'):
1456 if heads is None and remote.capable('changegroupsubset'):
1456 heads = rheads
1457 heads = rheads
1457
1458
1458 if heads is None:
1459 if heads is None:
1459 cg = remote.changegroup(fetch, 'pull')
1460 cg = remote.changegroup(fetch, 'pull')
1460 else:
1461 else:
1461 if not remote.capable('changegroupsubset'):
1462 if not remote.capable('changegroupsubset'):
1462 raise util.Abort(_("Partial pull cannot be done because "
1463 raise util.Abort(_("Partial pull cannot be done because "
1463 "other repository doesn't support "
1464 "other repository doesn't support "
1464 "changegroupsubset."))
1465 "changegroupsubset."))
1465 cg = remote.changegroupsubset(fetch, heads, 'pull')
1466 cg = remote.changegroupsubset(fetch, heads, 'pull')
1466 return self.addchangegroup(cg, 'pull', remote.url())
1467 return self.addchangegroup(cg, 'pull', remote.url())
1467 finally:
1468 finally:
1468 lock.release()
1469 lock.release()
1469
1470
1470 def push(self, remote, force=False, revs=None):
1471 def push(self, remote, force=False, revs=None):
1471 # there are two ways to push to remote repo:
1472 # there are two ways to push to remote repo:
1472 #
1473 #
1473 # addchangegroup assumes local user can lock remote
1474 # addchangegroup assumes local user can lock remote
1474 # repo (local filesystem, old ssh servers).
1475 # repo (local filesystem, old ssh servers).
1475 #
1476 #
1476 # unbundle assumes local user cannot lock remote repo (new ssh
1477 # unbundle assumes local user cannot lock remote repo (new ssh
1477 # servers, http servers).
1478 # servers, http servers).
1478
1479
1479 if remote.capable('unbundle'):
1480 if remote.capable('unbundle'):
1480 return self.push_unbundle(remote, force, revs)
1481 return self.push_unbundle(remote, force, revs)
1481 return self.push_addchangegroup(remote, force, revs)
1482 return self.push_addchangegroup(remote, force, revs)
1482
1483
1483 def prepush(self, remote, force, revs):
1484 def prepush(self, remote, force, revs):
1484 '''Analyze the local and remote repositories and determine which
1485 '''Analyze the local and remote repositories and determine which
1485 changesets need to be pushed to the remote. Return a tuple
1486 changesets need to be pushed to the remote. Return a tuple
1486 (changegroup, remoteheads). changegroup is a readable file-like
1487 (changegroup, remoteheads). changegroup is a readable file-like
1487 object whose read() returns successive changegroup chunks ready to
1488 object whose read() returns successive changegroup chunks ready to
1488 be sent over the wire. remoteheads is the list of remote heads.
1489 be sent over the wire. remoteheads is the list of remote heads.
1489 '''
1490 '''
1490 common = {}
1491 common = {}
1491 remote_heads = remote.heads()
1492 remote_heads = remote.heads()
1492 inc = self.findincoming(remote, common, remote_heads, force=force)
1493 inc = self.findincoming(remote, common, remote_heads, force=force)
1493
1494
1494 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1495 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1495 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1496 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1496
1497
1497 def checkbranch(lheads, rheads, updatelb):
1498 def checkbranch(lheads, rheads, updatelb):
1498 '''
1499 '''
1499 check whether there are more local heads than remote heads on
1500 check whether there are more local heads than remote heads on
1500 a specific branch.
1501 a specific branch.
1501
1502
1502 lheads: local branch heads
1503 lheads: local branch heads
1503 rheads: remote branch heads
1504 rheads: remote branch heads
1504 updatelb: outgoing local branch bases
1505 updatelb: outgoing local branch bases
1505 '''
1506 '''
1506
1507
1507 warn = 0
1508 warn = 0
1508
1509
1509 if not revs and len(lheads) > len(rheads):
1510 if not revs and len(lheads) > len(rheads):
1510 warn = 1
1511 warn = 1
1511 else:
1512 else:
1512 # add local heads involved in the push
1513 # add local heads involved in the push
1513 updatelheads = [self.changelog.heads(x, lheads)
1514 updatelheads = [self.changelog.heads(x, lheads)
1514 for x in updatelb]
1515 for x in updatelb]
1515 newheads = set(sum(updatelheads, [])) & set(lheads)
1516 newheads = set(sum(updatelheads, [])) & set(lheads)
1516
1517
1517 if not newheads:
1518 if not newheads:
1518 return True
1519 return True
1519
1520
1520 # add heads we don't have or that are not involved in the push
1521 # add heads we don't have or that are not involved in the push
1521 for r in rheads:
1522 for r in rheads:
1522 if r in self.changelog.nodemap:
1523 if r in self.changelog.nodemap:
1523 desc = self.changelog.heads(r, heads)
1524 desc = self.changelog.heads(r, heads)
1524 l = [h for h in heads if h in desc]
1525 l = [h for h in heads if h in desc]
1525 if not l:
1526 if not l:
1526 newheads.add(r)
1527 newheads.add(r)
1527 else:
1528 else:
1528 newheads.add(r)
1529 newheads.add(r)
1529 if len(newheads) > len(rheads):
1530 if len(newheads) > len(rheads):
1530 warn = 1
1531 warn = 1
1531
1532
1532 if warn:
1533 if warn:
1533 if not rheads: # new branch requires --force
1534 if not rheads: # new branch requires --force
1534 self.ui.warn(_("abort: push creates new"
1535 self.ui.warn(_("abort: push creates new"
1535 " remote branch '%s'!\n") %
1536 " remote branch '%s'!\n") %
1536 self[lheads[0]].branch())
1537 self[lheads[0]].branch())
1537 else:
1538 else:
1538 self.ui.warn(_("abort: push creates new remote heads!\n"))
1539 self.ui.warn(_("abort: push creates new remote heads!\n"))
1539
1540
1540 self.ui.status(_("(did you forget to merge?"
1541 self.ui.status(_("(did you forget to merge?"
1541 " use push -f to force)\n"))
1542 " use push -f to force)\n"))
1542 return False
1543 return False
1543 return True
1544 return True
1544
1545
1545 if not bases:
1546 if not bases:
1546 self.ui.status(_("no changes found\n"))
1547 self.ui.status(_("no changes found\n"))
1547 return None, 1
1548 return None, 1
1548 elif not force:
1549 elif not force:
1549 # Check for each named branch if we're creating new remote heads.
1550 # Check for each named branch if we're creating new remote heads.
1550 # To be a remote head after push, node must be either:
1551 # To be a remote head after push, node must be either:
1551 # - unknown locally
1552 # - unknown locally
1552 # - a local outgoing head descended from update
1553 # - a local outgoing head descended from update
1553 # - a remote head that's known locally and not
1554 # - a remote head that's known locally and not
1554 # ancestral to an outgoing head
1555 # ancestral to an outgoing head
1555 #
1556 #
1556 # New named branches cannot be created without --force.
1557 # New named branches cannot be created without --force.
1557
1558
1558 if remote_heads != [nullid]:
1559 if remote_heads != [nullid]:
1559 if remote.capable('branchmap'):
1560 if remote.capable('branchmap'):
1560 localhds = {}
1561 localhds = {}
1561 if not revs:
1562 if not revs:
1562 localhds = self.branchmap()
1563 localhds = self.branchmap()
1563 else:
1564 else:
1564 for n in heads:
1565 for n in heads:
1565 branch = self[n].branch()
1566 branch = self[n].branch()
1566 if branch in localhds:
1567 if branch in localhds:
1567 localhds[branch].append(n)
1568 localhds[branch].append(n)
1568 else:
1569 else:
1569 localhds[branch] = [n]
1570 localhds[branch] = [n]
1570
1571
1571 remotehds = remote.branchmap()
1572 remotehds = remote.branchmap()
1572
1573
1573 for lh in localhds:
1574 for lh in localhds:
1574 if lh in remotehds:
1575 if lh in remotehds:
1575 rheads = remotehds[lh]
1576 rheads = remotehds[lh]
1576 else:
1577 else:
1577 rheads = []
1578 rheads = []
1578 lheads = localhds[lh]
1579 lheads = localhds[lh]
1579 if not checkbranch(lheads, rheads, update):
1580 if not checkbranch(lheads, rheads, update):
1580 return None, 0
1581 return None, 0
1581 else:
1582 else:
1582 if not checkbranch(heads, remote_heads, update):
1583 if not checkbranch(heads, remote_heads, update):
1583 return None, 0
1584 return None, 0
1584
1585
1585 if inc:
1586 if inc:
1586 self.ui.warn(_("note: unsynced remote changes!\n"))
1587 self.ui.warn(_("note: unsynced remote changes!\n"))
1587
1588
1588
1589
1589 if revs is None:
1590 if revs is None:
1590 # use the fast path, no race possible on push
1591 # use the fast path, no race possible on push
1591 nodes = self.changelog.findmissing(common.keys())
1592 nodes = self.changelog.findmissing(common.keys())
1592 cg = self._changegroup(nodes, 'push')
1593 cg = self._changegroup(nodes, 'push')
1593 else:
1594 else:
1594 cg = self.changegroupsubset(update, revs, 'push')
1595 cg = self.changegroupsubset(update, revs, 'push')
1595 return cg, remote_heads
1596 return cg, remote_heads
1596
1597
1597 def push_addchangegroup(self, remote, force, revs):
1598 def push_addchangegroup(self, remote, force, revs):
1598 lock = remote.lock()
1599 lock = remote.lock()
1599 try:
1600 try:
1600 ret = self.prepush(remote, force, revs)
1601 ret = self.prepush(remote, force, revs)
1601 if ret[0] is not None:
1602 if ret[0] is not None:
1602 cg, remote_heads = ret
1603 cg, remote_heads = ret
1603 return remote.addchangegroup(cg, 'push', self.url())
1604 return remote.addchangegroup(cg, 'push', self.url())
1604 return ret[1]
1605 return ret[1]
1605 finally:
1606 finally:
1606 lock.release()
1607 lock.release()
1607
1608
1608 def push_unbundle(self, remote, force, revs):
1609 def push_unbundle(self, remote, force, revs):
1609 # local repo finds heads on server, finds out what revs it
1610 # local repo finds heads on server, finds out what revs it
1610 # must push. once revs transferred, if server finds it has
1611 # must push. once revs transferred, if server finds it has
1611 # different heads (someone else won commit/push race), server
1612 # different heads (someone else won commit/push race), server
1612 # aborts.
1613 # aborts.
1613
1614
1614 ret = self.prepush(remote, force, revs)
1615 ret = self.prepush(remote, force, revs)
1615 if ret[0] is not None:
1616 if ret[0] is not None:
1616 cg, remote_heads = ret
1617 cg, remote_heads = ret
1617 if force:
1618 if force:
1618 remote_heads = ['force']
1619 remote_heads = ['force']
1619 return remote.unbundle(cg, remote_heads, 'push')
1620 return remote.unbundle(cg, remote_heads, 'push')
1620 return ret[1]
1621 return ret[1]
1621
1622
1622 def changegroupinfo(self, nodes, source):
1623 def changegroupinfo(self, nodes, source):
1623 if self.ui.verbose or source == 'bundle':
1624 if self.ui.verbose or source == 'bundle':
1624 self.ui.status(_("%d changesets found\n") % len(nodes))
1625 self.ui.status(_("%d changesets found\n") % len(nodes))
1625 if self.ui.debugflag:
1626 if self.ui.debugflag:
1626 self.ui.debug("list of changesets:\n")
1627 self.ui.debug("list of changesets:\n")
1627 for node in nodes:
1628 for node in nodes:
1628 self.ui.debug("%s\n" % hex(node))
1629 self.ui.debug("%s\n" % hex(node))
1629
1630
1630 def changegroupsubset(self, bases, heads, source, extranodes=None):
1631 def changegroupsubset(self, bases, heads, source, extranodes=None):
1631 """Compute a changegroup consisting of all the nodes that are
1632 """Compute a changegroup consisting of all the nodes that are
1632 descendents of any of the bases and ancestors of any of the heads.
1633 descendents of any of the bases and ancestors of any of the heads.
1633 Return a chunkbuffer object whose read() method will return
1634 Return a chunkbuffer object whose read() method will return
1634 successive changegroup chunks.
1635 successive changegroup chunks.
1635
1636
1636 It is fairly complex as determining which filenodes and which
1637 It is fairly complex as determining which filenodes and which
1637 manifest nodes need to be included for the changeset to be complete
1638 manifest nodes need to be included for the changeset to be complete
1638 is non-trivial.
1639 is non-trivial.
1639
1640
1640 Another wrinkle is doing the reverse, figuring out which changeset in
1641 Another wrinkle is doing the reverse, figuring out which changeset in
1641 the changegroup a particular filenode or manifestnode belongs to.
1642 the changegroup a particular filenode or manifestnode belongs to.
1642
1643
1643 The caller can specify some nodes that must be included in the
1644 The caller can specify some nodes that must be included in the
1644 changegroup using the extranodes argument. It should be a dict
1645 changegroup using the extranodes argument. It should be a dict
1645 where the keys are the filenames (or 1 for the manifest), and the
1646 where the keys are the filenames (or 1 for the manifest), and the
1646 values are lists of (node, linknode) tuples, where node is a wanted
1647 values are lists of (node, linknode) tuples, where node is a wanted
1647 node and linknode is the changelog node that should be transmitted as
1648 node and linknode is the changelog node that should be transmitted as
1648 the linkrev.
1649 the linkrev.
1649 """
1650 """
1650
1651
1651 # Set up some initial variables
1652 # Set up some initial variables
1652 # Make it easy to refer to self.changelog
1653 # Make it easy to refer to self.changelog
1653 cl = self.changelog
1654 cl = self.changelog
1654 # msng is short for missing - compute the list of changesets in this
1655 # msng is short for missing - compute the list of changesets in this
1655 # changegroup.
1656 # changegroup.
1656 if not bases:
1657 if not bases:
1657 bases = [nullid]
1658 bases = [nullid]
1658 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1659 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1659
1660
1660 if extranodes is None:
1661 if extranodes is None:
1661 # can we go through the fast path ?
1662 # can we go through the fast path ?
1662 heads.sort()
1663 heads.sort()
1663 allheads = self.heads()
1664 allheads = self.heads()
1664 allheads.sort()
1665 allheads.sort()
1665 if heads == allheads:
1666 if heads == allheads:
1666 return self._changegroup(msng_cl_lst, source)
1667 return self._changegroup(msng_cl_lst, source)
1667
1668
1668 # slow path
1669 # slow path
1669 self.hook('preoutgoing', throw=True, source=source)
1670 self.hook('preoutgoing', throw=True, source=source)
1670
1671
1671 self.changegroupinfo(msng_cl_lst, source)
1672 self.changegroupinfo(msng_cl_lst, source)
1672 # Some bases may turn out to be superfluous, and some heads may be
1673 # Some bases may turn out to be superfluous, and some heads may be
1673 # too. nodesbetween will return the minimal set of bases and heads
1674 # too. nodesbetween will return the minimal set of bases and heads
1674 # necessary to re-create the changegroup.
1675 # necessary to re-create the changegroup.
1675
1676
1676 # Known heads are the list of heads that it is assumed the recipient
1677 # Known heads are the list of heads that it is assumed the recipient
1677 # of this changegroup will know about.
1678 # of this changegroup will know about.
1678 knownheads = set()
1679 knownheads = set()
1679 # We assume that all parents of bases are known heads.
1680 # We assume that all parents of bases are known heads.
1680 for n in bases:
1681 for n in bases:
1681 knownheads.update(cl.parents(n))
1682 knownheads.update(cl.parents(n))
1682 knownheads.discard(nullid)
1683 knownheads.discard(nullid)
1683 knownheads = list(knownheads)
1684 knownheads = list(knownheads)
1684 if knownheads:
1685 if knownheads:
1685 # Now that we know what heads are known, we can compute which
1686 # Now that we know what heads are known, we can compute which
1686 # changesets are known. The recipient must know about all
1687 # changesets are known. The recipient must know about all
1687 # changesets required to reach the known heads from the null
1688 # changesets required to reach the known heads from the null
1688 # changeset.
1689 # changeset.
1689 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1690 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1690 junk = None
1691 junk = None
1691 # Transform the list into a set.
1692 # Transform the list into a set.
1692 has_cl_set = set(has_cl_set)
1693 has_cl_set = set(has_cl_set)
1693 else:
1694 else:
1694 # If there were no known heads, the recipient cannot be assumed to
1695 # If there were no known heads, the recipient cannot be assumed to
1695 # know about any changesets.
1696 # know about any changesets.
1696 has_cl_set = set()
1697 has_cl_set = set()
1697
1698
1698 # Make it easy to refer to self.manifest
1699 # Make it easy to refer to self.manifest
1699 mnfst = self.manifest
1700 mnfst = self.manifest
1700 # We don't know which manifests are missing yet
1701 # We don't know which manifests are missing yet
1701 msng_mnfst_set = {}
1702 msng_mnfst_set = {}
1702 # Nor do we know which filenodes are missing.
1703 # Nor do we know which filenodes are missing.
1703 msng_filenode_set = {}
1704 msng_filenode_set = {}
1704
1705
1705 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1706 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1706 junk = None
1707 junk = None
1707
1708
1708 # A changeset always belongs to itself, so the changenode lookup
1709 # A changeset always belongs to itself, so the changenode lookup
1709 # function for a changenode is identity.
1710 # function for a changenode is identity.
1710 def identity(x):
1711 def identity(x):
1711 return x
1712 return x
1712
1713
1713 # If we determine that a particular file or manifest node must be a
1714 # If we determine that a particular file or manifest node must be a
1714 # node that the recipient of the changegroup will already have, we can
1715 # node that the recipient of the changegroup will already have, we can
1715 # also assume the recipient will have all the parents. This function
1716 # also assume the recipient will have all the parents. This function
1716 # prunes them from the set of missing nodes.
1717 # prunes them from the set of missing nodes.
1717 def prune_parents(revlog, hasset, msngset):
1718 def prune_parents(revlog, hasset, msngset):
1718 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1719 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1719 msngset.pop(revlog.node(r), None)
1720 msngset.pop(revlog.node(r), None)
1720
1721
1721 # This is a function generating function used to set up an environment
1722 # This is a function generating function used to set up an environment
1722 # for the inner function to execute in.
1723 # for the inner function to execute in.
1723 def manifest_and_file_collector(changedfileset):
1724 def manifest_and_file_collector(changedfileset):
1724 # This is an information gathering function that gathers
1725 # This is an information gathering function that gathers
1725 # information from each changeset node that goes out as part of
1726 # information from each changeset node that goes out as part of
1726 # the changegroup. The information gathered is a list of which
1727 # the changegroup. The information gathered is a list of which
1727 # manifest nodes are potentially required (the recipient may
1728 # manifest nodes are potentially required (the recipient may
1728 # already have them) and total list of all files which were
1729 # already have them) and total list of all files which were
1729 # changed in any changeset in the changegroup.
1730 # changed in any changeset in the changegroup.
1730 #
1731 #
1731 # We also remember the first changenode we saw any manifest
1732 # We also remember the first changenode we saw any manifest
1732 # referenced by so we can later determine which changenode 'owns'
1733 # referenced by so we can later determine which changenode 'owns'
1733 # the manifest.
1734 # the manifest.
1734 def collect_manifests_and_files(clnode):
1735 def collect_manifests_and_files(clnode):
1735 c = cl.read(clnode)
1736 c = cl.read(clnode)
1736 for f in c[3]:
1737 for f in c[3]:
1737 # This is to make sure we only have one instance of each
1738 # This is to make sure we only have one instance of each
1738 # filename string for each filename.
1739 # filename string for each filename.
1739 changedfileset.setdefault(f, f)
1740 changedfileset.setdefault(f, f)
1740 msng_mnfst_set.setdefault(c[0], clnode)
1741 msng_mnfst_set.setdefault(c[0], clnode)
1741 return collect_manifests_and_files
1742 return collect_manifests_and_files
1742
1743
1743 # Figure out which manifest nodes (of the ones we think might be part
1744 # Figure out which manifest nodes (of the ones we think might be part
1744 # of the changegroup) the recipient must know about and remove them
1745 # of the changegroup) the recipient must know about and remove them
1745 # from the changegroup.
1746 # from the changegroup.
1746 def prune_manifests():
1747 def prune_manifests():
1747 has_mnfst_set = set()
1748 has_mnfst_set = set()
1748 for n in msng_mnfst_set:
1749 for n in msng_mnfst_set:
1749 # If a 'missing' manifest thinks it belongs to a changenode
1750 # If a 'missing' manifest thinks it belongs to a changenode
1750 # the recipient is assumed to have, obviously the recipient
1751 # the recipient is assumed to have, obviously the recipient
1751 # must have that manifest.
1752 # must have that manifest.
1752 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1753 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1753 if linknode in has_cl_set:
1754 if linknode in has_cl_set:
1754 has_mnfst_set.add(n)
1755 has_mnfst_set.add(n)
1755 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1756 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1756
1757
1757 # Use the information collected in collect_manifests_and_files to say
1758 # Use the information collected in collect_manifests_and_files to say
1758 # which changenode any manifestnode belongs to.
1759 # which changenode any manifestnode belongs to.
1759 def lookup_manifest_link(mnfstnode):
1760 def lookup_manifest_link(mnfstnode):
1760 return msng_mnfst_set[mnfstnode]
1761 return msng_mnfst_set[mnfstnode]
1761
1762
1762 # A function generating function that sets up the initial environment
1763 # A function generating function that sets up the initial environment
1763 # the inner function.
1764 # the inner function.
1764 def filenode_collector(changedfiles):
1765 def filenode_collector(changedfiles):
1765 # This gathers information from each manifestnode included in the
1766 # This gathers information from each manifestnode included in the
1766 # changegroup about which filenodes the manifest node references
1767 # changegroup about which filenodes the manifest node references
1767 # so we can include those in the changegroup too.
1768 # so we can include those in the changegroup too.
1768 #
1769 #
1769 # It also remembers which changenode each filenode belongs to. It
1770 # It also remembers which changenode each filenode belongs to. It
1770 # does this by assuming the a filenode belongs to the changenode
1771 # does this by assuming the a filenode belongs to the changenode
1771 # the first manifest that references it belongs to.
1772 # the first manifest that references it belongs to.
1772 def collect_msng_filenodes(mnfstnode):
1773 def collect_msng_filenodes(mnfstnode):
1773 r = mnfst.rev(mnfstnode)
1774 r = mnfst.rev(mnfstnode)
1774 if r - 1 in mnfst.parentrevs(r):
1775 if r - 1 in mnfst.parentrevs(r):
1775 # If the previous rev is one of the parents,
1776 # If the previous rev is one of the parents,
1776 # we only need to see a diff.
1777 # we only need to see a diff.
1777 deltamf = mnfst.readdelta(mnfstnode)
1778 deltamf = mnfst.readdelta(mnfstnode)
1778 # For each line in the delta
1779 # For each line in the delta
1779 for f, fnode in deltamf.iteritems():
1780 for f, fnode in deltamf.iteritems():
1780 f = changedfiles.get(f, None)
1781 f = changedfiles.get(f, None)
1781 # And if the file is in the list of files we care
1782 # And if the file is in the list of files we care
1782 # about.
1783 # about.
1783 if f is not None:
1784 if f is not None:
1784 # Get the changenode this manifest belongs to
1785 # Get the changenode this manifest belongs to
1785 clnode = msng_mnfst_set[mnfstnode]
1786 clnode = msng_mnfst_set[mnfstnode]
1786 # Create the set of filenodes for the file if
1787 # Create the set of filenodes for the file if
1787 # there isn't one already.
1788 # there isn't one already.
1788 ndset = msng_filenode_set.setdefault(f, {})
1789 ndset = msng_filenode_set.setdefault(f, {})
1789 # And set the filenode's changelog node to the
1790 # And set the filenode's changelog node to the
1790 # manifest's if it hasn't been set already.
1791 # manifest's if it hasn't been set already.
1791 ndset.setdefault(fnode, clnode)
1792 ndset.setdefault(fnode, clnode)
1792 else:
1793 else:
1793 # Otherwise we need a full manifest.
1794 # Otherwise we need a full manifest.
1794 m = mnfst.read(mnfstnode)
1795 m = mnfst.read(mnfstnode)
1795 # For every file in we care about.
1796 # For every file in we care about.
1796 for f in changedfiles:
1797 for f in changedfiles:
1797 fnode = m.get(f, None)
1798 fnode = m.get(f, None)
1798 # If it's in the manifest
1799 # If it's in the manifest
1799 if fnode is not None:
1800 if fnode is not None:
1800 # See comments above.
1801 # See comments above.
1801 clnode = msng_mnfst_set[mnfstnode]
1802 clnode = msng_mnfst_set[mnfstnode]
1802 ndset = msng_filenode_set.setdefault(f, {})
1803 ndset = msng_filenode_set.setdefault(f, {})
1803 ndset.setdefault(fnode, clnode)
1804 ndset.setdefault(fnode, clnode)
1804 return collect_msng_filenodes
1805 return collect_msng_filenodes
1805
1806
1806 # We have a list of filenodes we think we need for a file, lets remove
1807 # We have a list of filenodes we think we need for a file, lets remove
1807 # all those we know the recipient must have.
1808 # all those we know the recipient must have.
1808 def prune_filenodes(f, filerevlog):
1809 def prune_filenodes(f, filerevlog):
1809 msngset = msng_filenode_set[f]
1810 msngset = msng_filenode_set[f]
1810 hasset = set()
1811 hasset = set()
1811 # If a 'missing' filenode thinks it belongs to a changenode we
1812 # If a 'missing' filenode thinks it belongs to a changenode we
1812 # assume the recipient must have, then the recipient must have
1813 # assume the recipient must have, then the recipient must have
1813 # that filenode.
1814 # that filenode.
1814 for n in msngset:
1815 for n in msngset:
1815 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1816 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1816 if clnode in has_cl_set:
1817 if clnode in has_cl_set:
1817 hasset.add(n)
1818 hasset.add(n)
1818 prune_parents(filerevlog, hasset, msngset)
1819 prune_parents(filerevlog, hasset, msngset)
1819
1820
1820 # A function generator function that sets up the a context for the
1821 # A function generator function that sets up the a context for the
1821 # inner function.
1822 # inner function.
1822 def lookup_filenode_link_func(fname):
1823 def lookup_filenode_link_func(fname):
1823 msngset = msng_filenode_set[fname]
1824 msngset = msng_filenode_set[fname]
1824 # Lookup the changenode the filenode belongs to.
1825 # Lookup the changenode the filenode belongs to.
1825 def lookup_filenode_link(fnode):
1826 def lookup_filenode_link(fnode):
1826 return msngset[fnode]
1827 return msngset[fnode]
1827 return lookup_filenode_link
1828 return lookup_filenode_link
1828
1829
1829 # Add the nodes that were explicitly requested.
1830 # Add the nodes that were explicitly requested.
1830 def add_extra_nodes(name, nodes):
1831 def add_extra_nodes(name, nodes):
1831 if not extranodes or name not in extranodes:
1832 if not extranodes or name not in extranodes:
1832 return
1833 return
1833
1834
1834 for node, linknode in extranodes[name]:
1835 for node, linknode in extranodes[name]:
1835 if node not in nodes:
1836 if node not in nodes:
1836 nodes[node] = linknode
1837 nodes[node] = linknode
1837
1838
1838 # Now that we have all theses utility functions to help out and
1839 # Now that we have all theses utility functions to help out and
1839 # logically divide up the task, generate the group.
1840 # logically divide up the task, generate the group.
1840 def gengroup():
1841 def gengroup():
1841 # The set of changed files starts empty.
1842 # The set of changed files starts empty.
1842 changedfiles = {}
1843 changedfiles = {}
1843 # Create a changenode group generator that will call our functions
1844 # Create a changenode group generator that will call our functions
1844 # back to lookup the owning changenode and collect information.
1845 # back to lookup the owning changenode and collect information.
1845 group = cl.group(msng_cl_lst, identity,
1846 group = cl.group(msng_cl_lst, identity,
1846 manifest_and_file_collector(changedfiles))
1847 manifest_and_file_collector(changedfiles))
1847 for chnk in group:
1848 for chnk in group:
1848 yield chnk
1849 yield chnk
1849
1850
1850 # The list of manifests has been collected by the generator
1851 # The list of manifests has been collected by the generator
1851 # calling our functions back.
1852 # calling our functions back.
1852 prune_manifests()
1853 prune_manifests()
1853 add_extra_nodes(1, msng_mnfst_set)
1854 add_extra_nodes(1, msng_mnfst_set)
1854 msng_mnfst_lst = msng_mnfst_set.keys()
1855 msng_mnfst_lst = msng_mnfst_set.keys()
1855 # Sort the manifestnodes by revision number.
1856 # Sort the manifestnodes by revision number.
1856 msng_mnfst_lst.sort(key=mnfst.rev)
1857 msng_mnfst_lst.sort(key=mnfst.rev)
1857 # Create a generator for the manifestnodes that calls our lookup
1858 # Create a generator for the manifestnodes that calls our lookup
1858 # and data collection functions back.
1859 # and data collection functions back.
1859 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1860 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1860 filenode_collector(changedfiles))
1861 filenode_collector(changedfiles))
1861 for chnk in group:
1862 for chnk in group:
1862 yield chnk
1863 yield chnk
1863
1864
1864 # These are no longer needed, dereference and toss the memory for
1865 # These are no longer needed, dereference and toss the memory for
1865 # them.
1866 # them.
1866 msng_mnfst_lst = None
1867 msng_mnfst_lst = None
1867 msng_mnfst_set.clear()
1868 msng_mnfst_set.clear()
1868
1869
1869 if extranodes:
1870 if extranodes:
1870 for fname in extranodes:
1871 for fname in extranodes:
1871 if isinstance(fname, int):
1872 if isinstance(fname, int):
1872 continue
1873 continue
1873 msng_filenode_set.setdefault(fname, {})
1874 msng_filenode_set.setdefault(fname, {})
1874 changedfiles[fname] = 1
1875 changedfiles[fname] = 1
1875 # Go through all our files in order sorted by name.
1876 # Go through all our files in order sorted by name.
1876 for fname in sorted(changedfiles):
1877 for fname in sorted(changedfiles):
1877 filerevlog = self.file(fname)
1878 filerevlog = self.file(fname)
1878 if not len(filerevlog):
1879 if not len(filerevlog):
1879 raise util.Abort(_("empty or missing revlog for %s") % fname)
1880 raise util.Abort(_("empty or missing revlog for %s") % fname)
1880 # Toss out the filenodes that the recipient isn't really
1881 # Toss out the filenodes that the recipient isn't really
1881 # missing.
1882 # missing.
1882 if fname in msng_filenode_set:
1883 if fname in msng_filenode_set:
1883 prune_filenodes(fname, filerevlog)
1884 prune_filenodes(fname, filerevlog)
1884 add_extra_nodes(fname, msng_filenode_set[fname])
1885 add_extra_nodes(fname, msng_filenode_set[fname])
1885 msng_filenode_lst = msng_filenode_set[fname].keys()
1886 msng_filenode_lst = msng_filenode_set[fname].keys()
1886 else:
1887 else:
1887 msng_filenode_lst = []
1888 msng_filenode_lst = []
1888 # If any filenodes are left, generate the group for them,
1889 # If any filenodes are left, generate the group for them,
1889 # otherwise don't bother.
1890 # otherwise don't bother.
1890 if len(msng_filenode_lst) > 0:
1891 if len(msng_filenode_lst) > 0:
1891 yield changegroup.chunkheader(len(fname))
1892 yield changegroup.chunkheader(len(fname))
1892 yield fname
1893 yield fname
1893 # Sort the filenodes by their revision #
1894 # Sort the filenodes by their revision #
1894 msng_filenode_lst.sort(key=filerevlog.rev)
1895 msng_filenode_lst.sort(key=filerevlog.rev)
1895 # Create a group generator and only pass in a changenode
1896 # Create a group generator and only pass in a changenode
1896 # lookup function as we need to collect no information
1897 # lookup function as we need to collect no information
1897 # from filenodes.
1898 # from filenodes.
1898 group = filerevlog.group(msng_filenode_lst,
1899 group = filerevlog.group(msng_filenode_lst,
1899 lookup_filenode_link_func(fname))
1900 lookup_filenode_link_func(fname))
1900 for chnk in group:
1901 for chnk in group:
1901 yield chnk
1902 yield chnk
1902 if fname in msng_filenode_set:
1903 if fname in msng_filenode_set:
1903 # Don't need this anymore, toss it to free memory.
1904 # Don't need this anymore, toss it to free memory.
1904 del msng_filenode_set[fname]
1905 del msng_filenode_set[fname]
1905 # Signal that no more groups are left.
1906 # Signal that no more groups are left.
1906 yield changegroup.closechunk()
1907 yield changegroup.closechunk()
1907
1908
1908 if msng_cl_lst:
1909 if msng_cl_lst:
1909 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1910 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1910
1911
1911 return util.chunkbuffer(gengroup())
1912 return util.chunkbuffer(gengroup())
1912
1913
1913 def changegroup(self, basenodes, source):
1914 def changegroup(self, basenodes, source):
1914 # to avoid a race we use changegroupsubset() (issue1320)
1915 # to avoid a race we use changegroupsubset() (issue1320)
1915 return self.changegroupsubset(basenodes, self.heads(), source)
1916 return self.changegroupsubset(basenodes, self.heads(), source)
1916
1917
1917 def _changegroup(self, nodes, source):
1918 def _changegroup(self, nodes, source):
1918 """Compute the changegroup of all nodes that we have that a recipient
1919 """Compute the changegroup of all nodes that we have that a recipient
1919 doesn't. Return a chunkbuffer object whose read() method will return
1920 doesn't. Return a chunkbuffer object whose read() method will return
1920 successive changegroup chunks.
1921 successive changegroup chunks.
1921
1922
1922 This is much easier than the previous function as we can assume that
1923 This is much easier than the previous function as we can assume that
1923 the recipient has any changenode we aren't sending them.
1924 the recipient has any changenode we aren't sending them.
1924
1925
1925 nodes is the set of nodes to send"""
1926 nodes is the set of nodes to send"""
1926
1927
1927 self.hook('preoutgoing', throw=True, source=source)
1928 self.hook('preoutgoing', throw=True, source=source)
1928
1929
1929 cl = self.changelog
1930 cl = self.changelog
1930 revset = set([cl.rev(n) for n in nodes])
1931 revset = set([cl.rev(n) for n in nodes])
1931 self.changegroupinfo(nodes, source)
1932 self.changegroupinfo(nodes, source)
1932
1933
1933 def identity(x):
1934 def identity(x):
1934 return x
1935 return x
1935
1936
1936 def gennodelst(log):
1937 def gennodelst(log):
1937 for r in log:
1938 for r in log:
1938 if log.linkrev(r) in revset:
1939 if log.linkrev(r) in revset:
1939 yield log.node(r)
1940 yield log.node(r)
1940
1941
1941 def changed_file_collector(changedfileset):
1942 def changed_file_collector(changedfileset):
1942 def collect_changed_files(clnode):
1943 def collect_changed_files(clnode):
1943 c = cl.read(clnode)
1944 c = cl.read(clnode)
1944 changedfileset.update(c[3])
1945 changedfileset.update(c[3])
1945 return collect_changed_files
1946 return collect_changed_files
1946
1947
1947 def lookuprevlink_func(revlog):
1948 def lookuprevlink_func(revlog):
1948 def lookuprevlink(n):
1949 def lookuprevlink(n):
1949 return cl.node(revlog.linkrev(revlog.rev(n)))
1950 return cl.node(revlog.linkrev(revlog.rev(n)))
1950 return lookuprevlink
1951 return lookuprevlink
1951
1952
1952 def gengroup():
1953 def gengroup():
1953 '''yield a sequence of changegroup chunks (strings)'''
1954 '''yield a sequence of changegroup chunks (strings)'''
1954 # construct a list of all changed files
1955 # construct a list of all changed files
1955 changedfiles = set()
1956 changedfiles = set()
1956
1957
1957 for chnk in cl.group(nodes, identity,
1958 for chnk in cl.group(nodes, identity,
1958 changed_file_collector(changedfiles)):
1959 changed_file_collector(changedfiles)):
1959 yield chnk
1960 yield chnk
1960
1961
1961 mnfst = self.manifest
1962 mnfst = self.manifest
1962 nodeiter = gennodelst(mnfst)
1963 nodeiter = gennodelst(mnfst)
1963 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1964 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1964 yield chnk
1965 yield chnk
1965
1966
1966 for fname in sorted(changedfiles):
1967 for fname in sorted(changedfiles):
1967 filerevlog = self.file(fname)
1968 filerevlog = self.file(fname)
1968 if not len(filerevlog):
1969 if not len(filerevlog):
1969 raise util.Abort(_("empty or missing revlog for %s") % fname)
1970 raise util.Abort(_("empty or missing revlog for %s") % fname)
1970 nodeiter = gennodelst(filerevlog)
1971 nodeiter = gennodelst(filerevlog)
1971 nodeiter = list(nodeiter)
1972 nodeiter = list(nodeiter)
1972 if nodeiter:
1973 if nodeiter:
1973 yield changegroup.chunkheader(len(fname))
1974 yield changegroup.chunkheader(len(fname))
1974 yield fname
1975 yield fname
1975 lookup = lookuprevlink_func(filerevlog)
1976 lookup = lookuprevlink_func(filerevlog)
1976 for chnk in filerevlog.group(nodeiter, lookup):
1977 for chnk in filerevlog.group(nodeiter, lookup):
1977 yield chnk
1978 yield chnk
1978
1979
1979 yield changegroup.closechunk()
1980 yield changegroup.closechunk()
1980
1981
1981 if nodes:
1982 if nodes:
1982 self.hook('outgoing', node=hex(nodes[0]), source=source)
1983 self.hook('outgoing', node=hex(nodes[0]), source=source)
1983
1984
1984 return util.chunkbuffer(gengroup())
1985 return util.chunkbuffer(gengroup())
1985
1986
1986 def addchangegroup(self, source, srctype, url, emptyok=False):
1987 def addchangegroup(self, source, srctype, url, emptyok=False):
1987 """add changegroup to repo.
1988 """add changegroup to repo.
1988
1989
1989 return values:
1990 return values:
1990 - nothing changed or no source: 0
1991 - nothing changed or no source: 0
1991 - more heads than before: 1+added heads (2..n)
1992 - more heads than before: 1+added heads (2..n)
1992 - less heads than before: -1-removed heads (-2..-n)
1993 - less heads than before: -1-removed heads (-2..-n)
1993 - number of heads stays the same: 1
1994 - number of heads stays the same: 1
1994 """
1995 """
1995 def csmap(x):
1996 def csmap(x):
1996 self.ui.debug("add changeset %s\n" % short(x))
1997 self.ui.debug("add changeset %s\n" % short(x))
1997 return len(cl)
1998 return len(cl)
1998
1999
1999 def revmap(x):
2000 def revmap(x):
2000 return cl.rev(x)
2001 return cl.rev(x)
2001
2002
2002 if not source:
2003 if not source:
2003 return 0
2004 return 0
2004
2005
2005 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2006 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2006
2007
2007 changesets = files = revisions = 0
2008 changesets = files = revisions = 0
2008
2009
2009 # write changelog data to temp files so concurrent readers will not see
2010 # write changelog data to temp files so concurrent readers will not see
2010 # inconsistent view
2011 # inconsistent view
2011 cl = self.changelog
2012 cl = self.changelog
2012 cl.delayupdate()
2013 cl.delayupdate()
2013 oldheads = len(cl.heads())
2014 oldheads = len(cl.heads())
2014
2015
2015 tr = self.transaction()
2016 tr = self.transaction()
2016 try:
2017 try:
2017 trp = weakref.proxy(tr)
2018 trp = weakref.proxy(tr)
2018 # pull off the changeset group
2019 # pull off the changeset group
2019 self.ui.status(_("adding changesets\n"))
2020 self.ui.status(_("adding changesets\n"))
2020 clstart = len(cl)
2021 clstart = len(cl)
2021 chunkiter = changegroup.chunkiter(source)
2022 chunkiter = changegroup.chunkiter(source)
2022 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2023 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2023 raise util.Abort(_("received changelog group is empty"))
2024 raise util.Abort(_("received changelog group is empty"))
2024 clend = len(cl)
2025 clend = len(cl)
2025 changesets = clend - clstart
2026 changesets = clend - clstart
2026
2027
2027 # pull off the manifest group
2028 # pull off the manifest group
2028 self.ui.status(_("adding manifests\n"))
2029 self.ui.status(_("adding manifests\n"))
2029 chunkiter = changegroup.chunkiter(source)
2030 chunkiter = changegroup.chunkiter(source)
2030 # no need to check for empty manifest group here:
2031 # no need to check for empty manifest group here:
2031 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2032 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2032 # no new manifest will be created and the manifest group will
2033 # no new manifest will be created and the manifest group will
2033 # be empty during the pull
2034 # be empty during the pull
2034 self.manifest.addgroup(chunkiter, revmap, trp)
2035 self.manifest.addgroup(chunkiter, revmap, trp)
2035
2036
2036 # process the files
2037 # process the files
2037 self.ui.status(_("adding file changes\n"))
2038 self.ui.status(_("adding file changes\n"))
2038 while 1:
2039 while 1:
2039 f = changegroup.getchunk(source)
2040 f = changegroup.getchunk(source)
2040 if not f:
2041 if not f:
2041 break
2042 break
2042 self.ui.debug("adding %s revisions\n" % f)
2043 self.ui.debug("adding %s revisions\n" % f)
2043 fl = self.file(f)
2044 fl = self.file(f)
2044 o = len(fl)
2045 o = len(fl)
2045 chunkiter = changegroup.chunkiter(source)
2046 chunkiter = changegroup.chunkiter(source)
2046 if fl.addgroup(chunkiter, revmap, trp) is None:
2047 if fl.addgroup(chunkiter, revmap, trp) is None:
2047 raise util.Abort(_("received file revlog group is empty"))
2048 raise util.Abort(_("received file revlog group is empty"))
2048 revisions += len(fl) - o
2049 revisions += len(fl) - o
2049 files += 1
2050 files += 1
2050
2051
2051 newheads = len(cl.heads())
2052 newheads = len(cl.heads())
2052 heads = ""
2053 heads = ""
2053 if oldheads and newheads != oldheads:
2054 if oldheads and newheads != oldheads:
2054 heads = _(" (%+d heads)") % (newheads - oldheads)
2055 heads = _(" (%+d heads)") % (newheads - oldheads)
2055
2056
2056 self.ui.status(_("added %d changesets"
2057 self.ui.status(_("added %d changesets"
2057 " with %d changes to %d files%s\n")
2058 " with %d changes to %d files%s\n")
2058 % (changesets, revisions, files, heads))
2059 % (changesets, revisions, files, heads))
2059
2060
2060 if changesets > 0:
2061 if changesets > 0:
2061 p = lambda: cl.writepending() and self.root or ""
2062 p = lambda: cl.writepending() and self.root or ""
2062 self.hook('pretxnchangegroup', throw=True,
2063 self.hook('pretxnchangegroup', throw=True,
2063 node=hex(cl.node(clstart)), source=srctype,
2064 node=hex(cl.node(clstart)), source=srctype,
2064 url=url, pending=p)
2065 url=url, pending=p)
2065
2066
2066 # make changelog see real files again
2067 # make changelog see real files again
2067 cl.finalize(trp)
2068 cl.finalize(trp)
2068
2069
2069 tr.close()
2070 tr.close()
2070 finally:
2071 finally:
2071 del tr
2072 del tr
2072
2073
2073 if changesets > 0:
2074 if changesets > 0:
2074 # forcefully update the on-disk branch cache
2075 # forcefully update the on-disk branch cache
2075 self.ui.debug("updating the branch cache\n")
2076 self.ui.debug("updating the branch cache\n")
2076 self.branchtags()
2077 self.branchtags()
2077 self.hook("changegroup", node=hex(cl.node(clstart)),
2078 self.hook("changegroup", node=hex(cl.node(clstart)),
2078 source=srctype, url=url)
2079 source=srctype, url=url)
2079
2080
2080 for i in xrange(clstart, clend):
2081 for i in xrange(clstart, clend):
2081 self.hook("incoming", node=hex(cl.node(i)),
2082 self.hook("incoming", node=hex(cl.node(i)),
2082 source=srctype, url=url)
2083 source=srctype, url=url)
2083
2084
2084 # never return 0 here:
2085 # never return 0 here:
2085 if newheads < oldheads:
2086 if newheads < oldheads:
2086 return newheads - oldheads - 1
2087 return newheads - oldheads - 1
2087 else:
2088 else:
2088 return newheads - oldheads + 1
2089 return newheads - oldheads + 1
2089
2090
2090
2091
2091 def stream_in(self, remote):
2092 def stream_in(self, remote):
2092 fp = remote.stream_out()
2093 fp = remote.stream_out()
2093 l = fp.readline()
2094 l = fp.readline()
2094 try:
2095 try:
2095 resp = int(l)
2096 resp = int(l)
2096 except ValueError:
2097 except ValueError:
2097 raise error.ResponseError(
2098 raise error.ResponseError(
2098 _('Unexpected response from remote server:'), l)
2099 _('Unexpected response from remote server:'), l)
2099 if resp == 1:
2100 if resp == 1:
2100 raise util.Abort(_('operation forbidden by server'))
2101 raise util.Abort(_('operation forbidden by server'))
2101 elif resp == 2:
2102 elif resp == 2:
2102 raise util.Abort(_('locking the remote repository failed'))
2103 raise util.Abort(_('locking the remote repository failed'))
2103 elif resp != 0:
2104 elif resp != 0:
2104 raise util.Abort(_('the server sent an unknown error code'))
2105 raise util.Abort(_('the server sent an unknown error code'))
2105 self.ui.status(_('streaming all changes\n'))
2106 self.ui.status(_('streaming all changes\n'))
2106 l = fp.readline()
2107 l = fp.readline()
2107 try:
2108 try:
2108 total_files, total_bytes = map(int, l.split(' ', 1))
2109 total_files, total_bytes = map(int, l.split(' ', 1))
2109 except (ValueError, TypeError):
2110 except (ValueError, TypeError):
2110 raise error.ResponseError(
2111 raise error.ResponseError(
2111 _('Unexpected response from remote server:'), l)
2112 _('Unexpected response from remote server:'), l)
2112 self.ui.status(_('%d files to transfer, %s of data\n') %
2113 self.ui.status(_('%d files to transfer, %s of data\n') %
2113 (total_files, util.bytecount(total_bytes)))
2114 (total_files, util.bytecount(total_bytes)))
2114 start = time.time()
2115 start = time.time()
2115 for i in xrange(total_files):
2116 for i in xrange(total_files):
2116 # XXX doesn't support '\n' or '\r' in filenames
2117 # XXX doesn't support '\n' or '\r' in filenames
2117 l = fp.readline()
2118 l = fp.readline()
2118 try:
2119 try:
2119 name, size = l.split('\0', 1)
2120 name, size = l.split('\0', 1)
2120 size = int(size)
2121 size = int(size)
2121 except (ValueError, TypeError):
2122 except (ValueError, TypeError):
2122 raise error.ResponseError(
2123 raise error.ResponseError(
2123 _('Unexpected response from remote server:'), l)
2124 _('Unexpected response from remote server:'), l)
2124 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2125 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2125 # for backwards compat, name was partially encoded
2126 # for backwards compat, name was partially encoded
2126 ofp = self.sopener(store.decodedir(name), 'w')
2127 ofp = self.sopener(store.decodedir(name), 'w')
2127 for chunk in util.filechunkiter(fp, limit=size):
2128 for chunk in util.filechunkiter(fp, limit=size):
2128 ofp.write(chunk)
2129 ofp.write(chunk)
2129 ofp.close()
2130 ofp.close()
2130 elapsed = time.time() - start
2131 elapsed = time.time() - start
2131 if elapsed <= 0:
2132 if elapsed <= 0:
2132 elapsed = 0.001
2133 elapsed = 0.001
2133 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2134 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2134 (util.bytecount(total_bytes), elapsed,
2135 (util.bytecount(total_bytes), elapsed,
2135 util.bytecount(total_bytes / elapsed)))
2136 util.bytecount(total_bytes / elapsed)))
2136 self.invalidate()
2137 self.invalidate()
2137 return len(self.heads()) + 1
2138 return len(self.heads()) + 1
2138
2139
2139 def clone(self, remote, heads=[], stream=False):
2140 def clone(self, remote, heads=[], stream=False):
2140 '''clone remote repository.
2141 '''clone remote repository.
2141
2142
2142 keyword arguments:
2143 keyword arguments:
2143 heads: list of revs to clone (forces use of pull)
2144 heads: list of revs to clone (forces use of pull)
2144 stream: use streaming clone if possible'''
2145 stream: use streaming clone if possible'''
2145
2146
2146 # now, all clients that can request uncompressed clones can
2147 # now, all clients that can request uncompressed clones can
2147 # read repo formats supported by all servers that can serve
2148 # read repo formats supported by all servers that can serve
2148 # them.
2149 # them.
2149
2150
2150 # if revlog format changes, client will have to check version
2151 # if revlog format changes, client will have to check version
2151 # and format flags on "stream" capability, and use
2152 # and format flags on "stream" capability, and use
2152 # uncompressed only if compatible.
2153 # uncompressed only if compatible.
2153
2154
2154 if stream and not heads and remote.capable('stream'):
2155 if stream and not heads and remote.capable('stream'):
2155 return self.stream_in(remote)
2156 return self.stream_in(remote)
2156 return self.pull(remote, heads)
2157 return self.pull(remote, heads)
2157
2158
2158 # used to avoid circular references so destructors work
2159 # used to avoid circular references so destructors work
2159 def aftertrans(files):
2160 def aftertrans(files):
2160 renamefiles = [tuple(t) for t in files]
2161 renamefiles = [tuple(t) for t in files]
2161 def a():
2162 def a():
2162 for src, dest in renamefiles:
2163 for src, dest in renamefiles:
2163 util.rename(src, dest)
2164 util.rename(src, dest)
2164 return a
2165 return a
2165
2166
2166 def instance(ui, path, create):
2167 def instance(ui, path, create):
2167 return localrepository(ui, util.drop_scheme('file', path), create)
2168 return localrepository(ui, util.drop_scheme('file', path), create)
2168
2169
2169 def islocal(path):
2170 def islocal(path):
2170 return True
2171 return True
General Comments 0
You need to be logged in to leave comments. Login now