##// END OF EJS Templates
localrepo: improve readability of _findtags(), readtags() (issue548)....
Greg Ward -
r9147:234a230c default
parent child Browse files
Show More
@@ -1,2200 +1,2203 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache shared'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31 self.baseui = baseui
31 self.baseui = baseui
32 self.ui = baseui.copy()
32 self.ui = baseui.copy()
33
33
34 try:
34 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
36 extensions.loadall(self.ui)
37 except IOError:
37 except IOError:
38 pass
38 pass
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
46 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
49 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
50 requirements.append("fncache")
51 # create an invalid changelog
51 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
52 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
53 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
54 ' dummy changelog to prevent using the old repo layout'
55 )
55 )
56 reqfile = self.opener("requires", "w")
56 reqfile = self.opener("requires", "w")
57 for r in requirements:
57 for r in requirements:
58 reqfile.write("%s\n" % r)
58 reqfile.write("%s\n" % r)
59 reqfile.close()
59 reqfile.close()
60 else:
60 else:
61 raise error.RepoError(_("repository %s not found") % path)
61 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
62 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
63 raise error.RepoError(_("repository %s already exists") % path)
64 else:
64 else:
65 # find requirements
65 # find requirements
66 requirements = set()
66 requirements = set()
67 try:
67 try:
68 requirements = set(self.opener("requires").read().splitlines())
68 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
69 except IOError, inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 for r in requirements - self.supported:
72 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74
74
75 self.sharedpath = self.path
75 self.sharedpath = self.path
76 try:
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
78 if not os.path.exists(s):
79 raise error.RepoError(
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s') % s)
80 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 self.sharedpath = s
81 self.sharedpath = s
82 except IOError, inst:
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.spath = self.store.path
87 self.spath = self.store.path
88 self.sopener = self.store.opener
88 self.sopener = self.store.opener
89 self.sjoin = self.store.join
89 self.sjoin = self.store.join
90 self.opener.createmode = self.store.createmode
90 self.opener.createmode = self.store.createmode
91
91
92 # These two define the set of tags for this repository. _tags
92 # These two define the set of tags for this repository. _tags
93 # maps tag name to node; _tagtypes maps tag name to 'global' or
93 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # 'local'. (Global tags are defined by .hgtags across all
94 # 'local'. (Global tags are defined by .hgtags across all
95 # heads, and local tags are defined in .hg/localtags.) They
95 # heads, and local tags are defined in .hg/localtags.) They
96 # constitute the in-memory cache of tags.
96 # constitute the in-memory cache of tags.
97 self._tags = None
97 self._tags = None
98 self._tagtypes = None
98 self._tagtypes = None
99
99
100 self.branchcache = None
100 self.branchcache = None
101 self._ubranchcache = None # UTF-8 version of branchcache
101 self._ubranchcache = None # UTF-8 version of branchcache
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.nodetagscache = None
103 self.nodetagscache = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 @propertycache
108 @propertycache
109 def changelog(self):
109 def changelog(self):
110 c = changelog.changelog(self.sopener)
110 c = changelog.changelog(self.sopener)
111 if 'HG_PENDING' in os.environ:
111 if 'HG_PENDING' in os.environ:
112 p = os.environ['HG_PENDING']
112 p = os.environ['HG_PENDING']
113 if p.startswith(self.root):
113 if p.startswith(self.root):
114 c.readpending('00changelog.i.a')
114 c.readpending('00changelog.i.a')
115 self.sopener.defversion = c.version
115 self.sopener.defversion = c.version
116 return c
116 return c
117
117
118 @propertycache
118 @propertycache
119 def manifest(self):
119 def manifest(self):
120 return manifest.manifest(self.sopener)
120 return manifest.manifest(self.sopener)
121
121
122 @propertycache
122 @propertycache
123 def dirstate(self):
123 def dirstate(self):
124 return dirstate.dirstate(self.opener, self.ui, self.root)
124 return dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def __getitem__(self, changeid):
126 def __getitem__(self, changeid):
127 if changeid is None:
127 if changeid is None:
128 return context.workingctx(self)
128 return context.workingctx(self)
129 return context.changectx(self, changeid)
129 return context.changectx(self, changeid)
130
130
131 def __nonzero__(self):
131 def __nonzero__(self):
132 return True
132 return True
133
133
134 def __len__(self):
134 def __len__(self):
135 return len(self.changelog)
135 return len(self.changelog)
136
136
137 def __iter__(self):
137 def __iter__(self):
138 for i in xrange(len(self)):
138 for i in xrange(len(self)):
139 yield i
139 yield i
140
140
141 def url(self):
141 def url(self):
142 return 'file:' + self.root
142 return 'file:' + self.root
143
143
144 def hook(self, name, throw=False, **args):
144 def hook(self, name, throw=False, **args):
145 return hook.hook(self.ui, self, name, throw, **args)
145 return hook.hook(self.ui, self, name, throw, **args)
146
146
147 tag_disallowed = ':\r\n'
147 tag_disallowed = ':\r\n'
148
148
149 def _tag(self, names, node, message, local, user, date, extra={}):
149 def _tag(self, names, node, message, local, user, date, extra={}):
150 if isinstance(names, str):
150 if isinstance(names, str):
151 allchars = names
151 allchars = names
152 names = (names,)
152 names = (names,)
153 else:
153 else:
154 allchars = ''.join(names)
154 allchars = ''.join(names)
155 for c in self.tag_disallowed:
155 for c in self.tag_disallowed:
156 if c in allchars:
156 if c in allchars:
157 raise util.Abort(_('%r cannot be used in a tag name') % c)
157 raise util.Abort(_('%r cannot be used in a tag name') % c)
158
158
159 for name in names:
159 for name in names:
160 self.hook('pretag', throw=True, node=hex(node), tag=name,
160 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 local=local)
161 local=local)
162
162
163 def writetags(fp, names, munge, prevtags):
163 def writetags(fp, names, munge, prevtags):
164 fp.seek(0, 2)
164 fp.seek(0, 2)
165 if prevtags and prevtags[-1] != '\n':
165 if prevtags and prevtags[-1] != '\n':
166 fp.write('\n')
166 fp.write('\n')
167 for name in names:
167 for name in names:
168 m = munge and munge(name) or name
168 m = munge and munge(name) or name
169 if self._tagtypes and name in self._tagtypes:
169 if self._tagtypes and name in self._tagtypes:
170 old = self._tags.get(name, nullid)
170 old = self._tags.get(name, nullid)
171 fp.write('%s %s\n' % (hex(old), m))
171 fp.write('%s %s\n' % (hex(old), m))
172 fp.write('%s %s\n' % (hex(node), m))
172 fp.write('%s %s\n' % (hex(node), m))
173 fp.close()
173 fp.close()
174
174
175 prevtags = ''
175 prevtags = ''
176 if local:
176 if local:
177 try:
177 try:
178 fp = self.opener('localtags', 'r+')
178 fp = self.opener('localtags', 'r+')
179 except IOError:
179 except IOError:
180 fp = self.opener('localtags', 'a')
180 fp = self.opener('localtags', 'a')
181 else:
181 else:
182 prevtags = fp.read()
182 prevtags = fp.read()
183
183
184 # local tags are stored in the current charset
184 # local tags are stored in the current charset
185 writetags(fp, names, None, prevtags)
185 writetags(fp, names, None, prevtags)
186 for name in names:
186 for name in names:
187 self.hook('tag', node=hex(node), tag=name, local=local)
187 self.hook('tag', node=hex(node), tag=name, local=local)
188 return
188 return
189
189
190 try:
190 try:
191 fp = self.wfile('.hgtags', 'rb+')
191 fp = self.wfile('.hgtags', 'rb+')
192 except IOError:
192 except IOError:
193 fp = self.wfile('.hgtags', 'ab')
193 fp = self.wfile('.hgtags', 'ab')
194 else:
194 else:
195 prevtags = fp.read()
195 prevtags = fp.read()
196
196
197 # committed tags are stored in UTF-8
197 # committed tags are stored in UTF-8
198 writetags(fp, names, encoding.fromlocal, prevtags)
198 writetags(fp, names, encoding.fromlocal, prevtags)
199
199
200 if '.hgtags' not in self.dirstate:
200 if '.hgtags' not in self.dirstate:
201 self.add(['.hgtags'])
201 self.add(['.hgtags'])
202
202
203 m = match_.exact(self.root, '', ['.hgtags'])
203 m = match_.exact(self.root, '', ['.hgtags'])
204 tagnode = self.commit(message, user, date, extra=extra, match=m)
204 tagnode = self.commit(message, user, date, extra=extra, match=m)
205
205
206 for name in names:
206 for name in names:
207 self.hook('tag', node=hex(node), tag=name, local=local)
207 self.hook('tag', node=hex(node), tag=name, local=local)
208
208
209 return tagnode
209 return tagnode
210
210
211 def tag(self, names, node, message, local, user, date):
211 def tag(self, names, node, message, local, user, date):
212 '''tag a revision with one or more symbolic names.
212 '''tag a revision with one or more symbolic names.
213
213
214 names is a list of strings or, when adding a single tag, names may be a
214 names is a list of strings or, when adding a single tag, names may be a
215 string.
215 string.
216
216
217 if local is True, the tags are stored in a per-repository file.
217 if local is True, the tags are stored in a per-repository file.
218 otherwise, they are stored in the .hgtags file, and a new
218 otherwise, they are stored in the .hgtags file, and a new
219 changeset is committed with the change.
219 changeset is committed with the change.
220
220
221 keyword arguments:
221 keyword arguments:
222
222
223 local: whether to store tags in non-version-controlled file
223 local: whether to store tags in non-version-controlled file
224 (default False)
224 (default False)
225
225
226 message: commit message to use if committing
226 message: commit message to use if committing
227
227
228 user: name of user to use if committing
228 user: name of user to use if committing
229
229
230 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
231
231
232 for x in self.status()[:5]:
232 for x in self.status()[:5]:
233 if '.hgtags' in x:
233 if '.hgtags' in x:
234 raise util.Abort(_('working copy of .hgtags is changed '
234 raise util.Abort(_('working copy of .hgtags is changed '
235 '(please commit .hgtags manually)'))
235 '(please commit .hgtags manually)'))
236
236
237 self.tags() # instantiate the cache
237 self.tags() # instantiate the cache
238 self._tag(names, node, message, local, user, date)
238 self._tag(names, node, message, local, user, date)
239
239
240 def tags(self):
240 def tags(self):
241 '''return a mapping of tag to node'''
241 '''return a mapping of tag to node'''
242 if self._tags is None:
242 if self._tags is None:
243 (self._tags, self._tagtypes) = self._findtags()
243 (self._tags, self._tagtypes) = self._findtags()
244
244
245 return self._tags
245 return self._tags
246
246
247 def _findtags(self):
247 def _findtags(self):
248 '''Do the hard work of finding tags. Return a pair of dicts
248 '''Do the hard work of finding tags. Return a pair of dicts
249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 maps tag name to a string like \'global\' or \'local\'.
250 maps tag name to a string like \'global\' or \'local\'.
251 Subclasses or extensions are free to add their own tags, but
251 Subclasses or extensions are free to add their own tags, but
252 should be aware that the returned dicts will be retained for the
252 should be aware that the returned dicts will be retained for the
253 duration of the localrepo object.'''
253 duration of the localrepo object.'''
254
254
255 # XXX what tagtype should subclasses/extensions use? Currently
255 # XXX what tagtype should subclasses/extensions use? Currently
256 # mq and bookmarks add tags, but do not set the tagtype at all.
256 # mq and bookmarks add tags, but do not set the tagtype at all.
257 # Should each extension invent its own tag type? Should there
257 # Should each extension invent its own tag type? Should there
258 # be one tagtype for all such "virtual" tags? Or is the status
258 # be one tagtype for all such "virtual" tags? Or is the status
259 # quo fine?
259 # quo fine?
260
260
261 globaltags = {}
261 alltags = {} # map tag name to (node, hist)
262 tagtypes = {}
262 tagtypes = {}
263
263
264 def readtags(lines, fn, tagtype):
264 def readtags(lines, fn, tagtype):
265 filetags = {}
265 filetags = {} # map tag name to (node, hist)
266 count = 0
266 count = 0
267
267
268 def warn(msg):
268 def warn(msg):
269 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
269 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
270
270
271 for l in lines:
271 for line in lines:
272 count += 1
272 count += 1
273 if not l:
273 if not line:
274 continue
274 continue
275 s = l.split(" ", 1)
275 try:
276 if len(s) != 2:
276 (nodehex, name) = line.split(" ", 1)
277 except ValueError:
277 warn(_("cannot parse entry"))
278 warn(_("cannot parse entry"))
278 continue
279 continue
279 node, key = s
280 name = encoding.tolocal(name.strip()) # stored in UTF-8
280 key = encoding.tolocal(key.strip()) # stored in UTF-8
281 try:
281 try:
282 bin_n = bin(node)
282 nodebin = bin(nodehex)
283 except TypeError:
283 except TypeError:
284 warn(_("node '%s' is not well formed") % node)
284 warn(_("node '%s' is not well formed") % nodehex)
285 continue
285 continue
286 if bin_n not in self.changelog.nodemap:
286 if nodebin not in self.changelog.nodemap:
287 # silently ignore as pull -r might cause this
287 # silently ignore as pull -r might cause this
288 continue
288 continue
289
289
290 h = []
290 # update filetags: map tag name to (node, hist) where
291 if key in filetags:
291 # node is the node from the latest line read with
292 n, h = filetags[key]
292 # 'name', and hist is the list of nodes previously
293 h.append(n)
293 # associated with 'name'
294 filetags[key] = (bin_n, h)
294 hist = []
295 if name in filetags:
296 n, hist = filetags[name]
297 hist.append(n)
298 filetags[name] = (nodebin, hist)
295
299
296 for k, nh in filetags.iteritems():
300 for name, nodehist in filetags.iteritems():
297 if k not in globaltags:
301 if name not in alltags:
298 globaltags[k] = nh
302 alltags[name] = nodehist
299 tagtypes[k] = tagtype
303 tagtypes[name] = tagtype
300 continue
304 continue
301
305
302 # we prefer the global tag if:
306 # we prefer alltags[name] if:
303 # it supercedes us OR
307 # it supercedes us OR
304 # mutual supercedes and it has a higher rank
308 # mutual supercedes and it has a higher rank
305 # otherwise we win because we're tip-most
309 # otherwise we win because we're tip-most
306 an, ah = nh
310 anode, ahist = nodehist
307 bn, bh = globaltags[k]
311 bnode, bhist = alltags[name]
308 if (bn != an and an in bh and
312 if (bnode != anode and anode in bhist and
309 (bn not in ah or len(bh) > len(ah))):
313 (bnode not in ahist or len(bhist) > len(ahist))):
310 an = bn
314 anode = bnode
311 ah.extend([n for n in bh if n not in ah])
315 ahist.extend([n for n in bhist if n not in ahist])
312 globaltags[k] = an, ah
316 alltags[name] = anode, ahist
313 tagtypes[k] = tagtype
317 tagtypes[name] = tagtype
314
318
315 seen = set()
319 seen = set()
316 f = None
320 fctx = None
317 ctxs = []
321 ctxs = [] # list of filectx
318 for node in self.heads():
322 for node in self.heads():
319 try:
323 try:
320 fnode = self[node].filenode('.hgtags')
324 fnode = self[node].filenode('.hgtags')
321 except error.LookupError:
325 except error.LookupError:
322 continue
326 continue
323 if fnode not in seen:
327 if fnode not in seen:
324 seen.add(fnode)
328 seen.add(fnode)
325 if not f:
329 if not fctx:
326 f = self.filectx('.hgtags', fileid=fnode)
330 fctx = self.filectx('.hgtags', fileid=fnode)
327 else:
331 else:
328 f = f.filectx(fnode)
332 fctx = fctx.filectx(fnode)
329 ctxs.append(f)
333 ctxs.append(fctx)
330
334
331 # read the tags file from each head, ending with the tip
335 # read the tags file from each head, ending with the tip
332 for f in reversed(ctxs):
336 for fctx in reversed(ctxs):
333 readtags(f.data().splitlines(), f, "global")
337 readtags(fctx.data().splitlines(), fctx, "global")
334
338
335 try:
339 try:
336 data = encoding.fromlocal(self.opener("localtags").read())
340 data = encoding.fromlocal(self.opener("localtags").read())
337 # localtags are stored in the local character set
341 # localtags are stored in the local character set
338 # while the internal tag table is stored in UTF-8
342 # while the internal tag table is stored in UTF-8
339 readtags(data.splitlines(), "localtags", "local")
343 readtags(data.splitlines(), "localtags", "local")
340 except IOError:
344 except IOError:
341 pass
345 pass
342
346
343 tags = {}
347 tags = {}
344 for k, nh in globaltags.iteritems():
348 for (name, (node, hist)) in alltags.iteritems():
345 n = nh[0]
349 if node != nullid:
346 if n != nullid:
350 tags[name] = node
347 tags[k] = n
348 tags['tip'] = self.changelog.tip()
351 tags['tip'] = self.changelog.tip()
349 return (tags, tagtypes)
352 return (tags, tagtypes)
350
353
351 def tagtype(self, tagname):
354 def tagtype(self, tagname):
352 '''
355 '''
353 return the type of the given tag. result can be:
356 return the type of the given tag. result can be:
354
357
355 'local' : a local tag
358 'local' : a local tag
356 'global' : a global tag
359 'global' : a global tag
357 None : tag does not exist
360 None : tag does not exist
358 '''
361 '''
359
362
360 self.tags()
363 self.tags()
361
364
362 return self._tagtypes.get(tagname)
365 return self._tagtypes.get(tagname)
363
366
364 def tagslist(self):
367 def tagslist(self):
365 '''return a list of tags ordered by revision'''
368 '''return a list of tags ordered by revision'''
366 l = []
369 l = []
367 for t, n in self.tags().iteritems():
370 for t, n in self.tags().iteritems():
368 try:
371 try:
369 r = self.changelog.rev(n)
372 r = self.changelog.rev(n)
370 except:
373 except:
371 r = -2 # sort to the beginning of the list if unknown
374 r = -2 # sort to the beginning of the list if unknown
372 l.append((r, t, n))
375 l.append((r, t, n))
373 return [(t, n) for r, t, n in sorted(l)]
376 return [(t, n) for r, t, n in sorted(l)]
374
377
375 def nodetags(self, node):
378 def nodetags(self, node):
376 '''return the tags associated with a node'''
379 '''return the tags associated with a node'''
377 if not self.nodetagscache:
380 if not self.nodetagscache:
378 self.nodetagscache = {}
381 self.nodetagscache = {}
379 for t, n in self.tags().iteritems():
382 for t, n in self.tags().iteritems():
380 self.nodetagscache.setdefault(n, []).append(t)
383 self.nodetagscache.setdefault(n, []).append(t)
381 return self.nodetagscache.get(node, [])
384 return self.nodetagscache.get(node, [])
382
385
383 def _branchtags(self, partial, lrev):
386 def _branchtags(self, partial, lrev):
384 # TODO: rename this function?
387 # TODO: rename this function?
385 tiprev = len(self) - 1
388 tiprev = len(self) - 1
386 if lrev != tiprev:
389 if lrev != tiprev:
387 self._updatebranchcache(partial, lrev+1, tiprev+1)
390 self._updatebranchcache(partial, lrev+1, tiprev+1)
388 self._writebranchcache(partial, self.changelog.tip(), tiprev)
391 self._writebranchcache(partial, self.changelog.tip(), tiprev)
389
392
390 return partial
393 return partial
391
394
392 def branchmap(self):
395 def branchmap(self):
393 tip = self.changelog.tip()
396 tip = self.changelog.tip()
394 if self.branchcache is not None and self._branchcachetip == tip:
397 if self.branchcache is not None and self._branchcachetip == tip:
395 return self.branchcache
398 return self.branchcache
396
399
397 oldtip = self._branchcachetip
400 oldtip = self._branchcachetip
398 self._branchcachetip = tip
401 self._branchcachetip = tip
399 if self.branchcache is None:
402 if self.branchcache is None:
400 self.branchcache = {} # avoid recursion in changectx
403 self.branchcache = {} # avoid recursion in changectx
401 else:
404 else:
402 self.branchcache.clear() # keep using the same dict
405 self.branchcache.clear() # keep using the same dict
403 if oldtip is None or oldtip not in self.changelog.nodemap:
406 if oldtip is None or oldtip not in self.changelog.nodemap:
404 partial, last, lrev = self._readbranchcache()
407 partial, last, lrev = self._readbranchcache()
405 else:
408 else:
406 lrev = self.changelog.rev(oldtip)
409 lrev = self.changelog.rev(oldtip)
407 partial = self._ubranchcache
410 partial = self._ubranchcache
408
411
409 self._branchtags(partial, lrev)
412 self._branchtags(partial, lrev)
410 # this private cache holds all heads (not just tips)
413 # this private cache holds all heads (not just tips)
411 self._ubranchcache = partial
414 self._ubranchcache = partial
412
415
413 # the branch cache is stored on disk as UTF-8, but in the local
416 # the branch cache is stored on disk as UTF-8, but in the local
414 # charset internally
417 # charset internally
415 for k, v in partial.iteritems():
418 for k, v in partial.iteritems():
416 self.branchcache[encoding.tolocal(k)] = v
419 self.branchcache[encoding.tolocal(k)] = v
417 return self.branchcache
420 return self.branchcache
418
421
419
422
420 def branchtags(self):
423 def branchtags(self):
421 '''return a dict where branch names map to the tipmost head of
424 '''return a dict where branch names map to the tipmost head of
422 the branch, open heads come before closed'''
425 the branch, open heads come before closed'''
423 bt = {}
426 bt = {}
424 for bn, heads in self.branchmap().iteritems():
427 for bn, heads in self.branchmap().iteritems():
425 head = None
428 head = None
426 for i in range(len(heads)-1, -1, -1):
429 for i in range(len(heads)-1, -1, -1):
427 h = heads[i]
430 h = heads[i]
428 if 'close' not in self.changelog.read(h)[5]:
431 if 'close' not in self.changelog.read(h)[5]:
429 head = h
432 head = h
430 break
433 break
431 # no open heads were found
434 # no open heads were found
432 if head is None:
435 if head is None:
433 head = heads[-1]
436 head = heads[-1]
434 bt[bn] = head
437 bt[bn] = head
435 return bt
438 return bt
436
439
437
440
438 def _readbranchcache(self):
441 def _readbranchcache(self):
439 partial = {}
442 partial = {}
440 try:
443 try:
441 f = self.opener("branchheads.cache")
444 f = self.opener("branchheads.cache")
442 lines = f.read().split('\n')
445 lines = f.read().split('\n')
443 f.close()
446 f.close()
444 except (IOError, OSError):
447 except (IOError, OSError):
445 return {}, nullid, nullrev
448 return {}, nullid, nullrev
446
449
447 try:
450 try:
448 last, lrev = lines.pop(0).split(" ", 1)
451 last, lrev = lines.pop(0).split(" ", 1)
449 last, lrev = bin(last), int(lrev)
452 last, lrev = bin(last), int(lrev)
450 if lrev >= len(self) or self[lrev].node() != last:
453 if lrev >= len(self) or self[lrev].node() != last:
451 # invalidate the cache
454 # invalidate the cache
452 raise ValueError('invalidating branch cache (tip differs)')
455 raise ValueError('invalidating branch cache (tip differs)')
453 for l in lines:
456 for l in lines:
454 if not l: continue
457 if not l: continue
455 node, label = l.split(" ", 1)
458 node, label = l.split(" ", 1)
456 partial.setdefault(label.strip(), []).append(bin(node))
459 partial.setdefault(label.strip(), []).append(bin(node))
457 except KeyboardInterrupt:
460 except KeyboardInterrupt:
458 raise
461 raise
459 except Exception, inst:
462 except Exception, inst:
460 if self.ui.debugflag:
463 if self.ui.debugflag:
461 self.ui.warn(str(inst), '\n')
464 self.ui.warn(str(inst), '\n')
462 partial, last, lrev = {}, nullid, nullrev
465 partial, last, lrev = {}, nullid, nullrev
463 return partial, last, lrev
466 return partial, last, lrev
464
467
465 def _writebranchcache(self, branches, tip, tiprev):
468 def _writebranchcache(self, branches, tip, tiprev):
466 try:
469 try:
467 f = self.opener("branchheads.cache", "w", atomictemp=True)
470 f = self.opener("branchheads.cache", "w", atomictemp=True)
468 f.write("%s %s\n" % (hex(tip), tiprev))
471 f.write("%s %s\n" % (hex(tip), tiprev))
469 for label, nodes in branches.iteritems():
472 for label, nodes in branches.iteritems():
470 for node in nodes:
473 for node in nodes:
471 f.write("%s %s\n" % (hex(node), label))
474 f.write("%s %s\n" % (hex(node), label))
472 f.rename()
475 f.rename()
473 except (IOError, OSError):
476 except (IOError, OSError):
474 pass
477 pass
475
478
476 def _updatebranchcache(self, partial, start, end):
479 def _updatebranchcache(self, partial, start, end):
477 # collect new branch entries
480 # collect new branch entries
478 newbranches = {}
481 newbranches = {}
479 for r in xrange(start, end):
482 for r in xrange(start, end):
480 c = self[r]
483 c = self[r]
481 newbranches.setdefault(c.branch(), []).append(c.node())
484 newbranches.setdefault(c.branch(), []).append(c.node())
482 # if older branchheads are reachable from new ones, they aren't
485 # if older branchheads are reachable from new ones, they aren't
483 # really branchheads. Note checking parents is insufficient:
486 # really branchheads. Note checking parents is insufficient:
484 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
487 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
485 for branch, newnodes in newbranches.iteritems():
488 for branch, newnodes in newbranches.iteritems():
486 bheads = partial.setdefault(branch, [])
489 bheads = partial.setdefault(branch, [])
487 bheads.extend(newnodes)
490 bheads.extend(newnodes)
488 if len(bheads) < 2:
491 if len(bheads) < 2:
489 continue
492 continue
490 newbheads = []
493 newbheads = []
491 # starting from tip means fewer passes over reachable
494 # starting from tip means fewer passes over reachable
492 while newnodes:
495 while newnodes:
493 latest = newnodes.pop()
496 latest = newnodes.pop()
494 if latest not in bheads:
497 if latest not in bheads:
495 continue
498 continue
496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
499 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
497 reachable = self.changelog.reachable(latest, minbhrev)
500 reachable = self.changelog.reachable(latest, minbhrev)
498 bheads = [b for b in bheads if b not in reachable]
501 bheads = [b for b in bheads if b not in reachable]
499 newbheads.insert(0, latest)
502 newbheads.insert(0, latest)
500 bheads.extend(newbheads)
503 bheads.extend(newbheads)
501 partial[branch] = bheads
504 partial[branch] = bheads
502
505
503 def lookup(self, key):
506 def lookup(self, key):
504 if isinstance(key, int):
507 if isinstance(key, int):
505 return self.changelog.node(key)
508 return self.changelog.node(key)
506 elif key == '.':
509 elif key == '.':
507 return self.dirstate.parents()[0]
510 return self.dirstate.parents()[0]
508 elif key == 'null':
511 elif key == 'null':
509 return nullid
512 return nullid
510 elif key == 'tip':
513 elif key == 'tip':
511 return self.changelog.tip()
514 return self.changelog.tip()
512 n = self.changelog._match(key)
515 n = self.changelog._match(key)
513 if n:
516 if n:
514 return n
517 return n
515 if key in self.tags():
518 if key in self.tags():
516 return self.tags()[key]
519 return self.tags()[key]
517 if key in self.branchtags():
520 if key in self.branchtags():
518 return self.branchtags()[key]
521 return self.branchtags()[key]
519 n = self.changelog._partialmatch(key)
522 n = self.changelog._partialmatch(key)
520 if n:
523 if n:
521 return n
524 return n
522
525
523 # can't find key, check if it might have come from damaged dirstate
526 # can't find key, check if it might have come from damaged dirstate
524 if key in self.dirstate.parents():
527 if key in self.dirstate.parents():
525 raise error.Abort(_("working directory has unknown parent '%s'!")
528 raise error.Abort(_("working directory has unknown parent '%s'!")
526 % short(key))
529 % short(key))
527 try:
530 try:
528 if len(key) == 20:
531 if len(key) == 20:
529 key = hex(key)
532 key = hex(key)
530 except:
533 except:
531 pass
534 pass
532 raise error.RepoError(_("unknown revision '%s'") % key)
535 raise error.RepoError(_("unknown revision '%s'") % key)
533
536
534 def local(self):
537 def local(self):
535 return True
538 return True
536
539
537 def join(self, f):
540 def join(self, f):
538 return os.path.join(self.path, f)
541 return os.path.join(self.path, f)
539
542
540 def wjoin(self, f):
543 def wjoin(self, f):
541 return os.path.join(self.root, f)
544 return os.path.join(self.root, f)
542
545
543 def rjoin(self, f):
546 def rjoin(self, f):
544 return os.path.join(self.root, util.pconvert(f))
547 return os.path.join(self.root, util.pconvert(f))
545
548
546 def file(self, f):
549 def file(self, f):
547 if f[0] == '/':
550 if f[0] == '/':
548 f = f[1:]
551 f = f[1:]
549 return filelog.filelog(self.sopener, f)
552 return filelog.filelog(self.sopener, f)
550
553
551 def changectx(self, changeid):
554 def changectx(self, changeid):
552 return self[changeid]
555 return self[changeid]
553
556
554 def parents(self, changeid=None):
557 def parents(self, changeid=None):
555 '''get list of changectxs for parents of changeid'''
558 '''get list of changectxs for parents of changeid'''
556 return self[changeid].parents()
559 return self[changeid].parents()
557
560
558 def filectx(self, path, changeid=None, fileid=None):
561 def filectx(self, path, changeid=None, fileid=None):
559 """changeid can be a changeset revision, node, or tag.
562 """changeid can be a changeset revision, node, or tag.
560 fileid can be a file revision or node."""
563 fileid can be a file revision or node."""
561 return context.filectx(self, path, changeid, fileid)
564 return context.filectx(self, path, changeid, fileid)
562
565
563 def getcwd(self):
566 def getcwd(self):
564 return self.dirstate.getcwd()
567 return self.dirstate.getcwd()
565
568
566 def pathto(self, f, cwd=None):
569 def pathto(self, f, cwd=None):
567 return self.dirstate.pathto(f, cwd)
570 return self.dirstate.pathto(f, cwd)
568
571
569 def wfile(self, f, mode='r'):
572 def wfile(self, f, mode='r'):
570 return self.wopener(f, mode)
573 return self.wopener(f, mode)
571
574
572 def _link(self, f):
575 def _link(self, f):
573 return os.path.islink(self.wjoin(f))
576 return os.path.islink(self.wjoin(f))
574
577
575 def _filter(self, filter, filename, data):
578 def _filter(self, filter, filename, data):
576 if filter not in self.filterpats:
579 if filter not in self.filterpats:
577 l = []
580 l = []
578 for pat, cmd in self.ui.configitems(filter):
581 for pat, cmd in self.ui.configitems(filter):
579 if cmd == '!':
582 if cmd == '!':
580 continue
583 continue
581 mf = match_.match(self.root, '', [pat])
584 mf = match_.match(self.root, '', [pat])
582 fn = None
585 fn = None
583 params = cmd
586 params = cmd
584 for name, filterfn in self._datafilters.iteritems():
587 for name, filterfn in self._datafilters.iteritems():
585 if cmd.startswith(name):
588 if cmd.startswith(name):
586 fn = filterfn
589 fn = filterfn
587 params = cmd[len(name):].lstrip()
590 params = cmd[len(name):].lstrip()
588 break
591 break
589 if not fn:
592 if not fn:
590 fn = lambda s, c, **kwargs: util.filter(s, c)
593 fn = lambda s, c, **kwargs: util.filter(s, c)
591 # Wrap old filters not supporting keyword arguments
594 # Wrap old filters not supporting keyword arguments
592 if not inspect.getargspec(fn)[2]:
595 if not inspect.getargspec(fn)[2]:
593 oldfn = fn
596 oldfn = fn
594 fn = lambda s, c, **kwargs: oldfn(s, c)
597 fn = lambda s, c, **kwargs: oldfn(s, c)
595 l.append((mf, fn, params))
598 l.append((mf, fn, params))
596 self.filterpats[filter] = l
599 self.filterpats[filter] = l
597
600
598 for mf, fn, cmd in self.filterpats[filter]:
601 for mf, fn, cmd in self.filterpats[filter]:
599 if mf(filename):
602 if mf(filename):
600 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
603 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
601 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
604 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
602 break
605 break
603
606
604 return data
607 return data
605
608
606 def adddatafilter(self, name, filter):
609 def adddatafilter(self, name, filter):
607 self._datafilters[name] = filter
610 self._datafilters[name] = filter
608
611
609 def wread(self, filename):
612 def wread(self, filename):
610 if self._link(filename):
613 if self._link(filename):
611 data = os.readlink(self.wjoin(filename))
614 data = os.readlink(self.wjoin(filename))
612 else:
615 else:
613 data = self.wopener(filename, 'r').read()
616 data = self.wopener(filename, 'r').read()
614 return self._filter("encode", filename, data)
617 return self._filter("encode", filename, data)
615
618
616 def wwrite(self, filename, data, flags):
619 def wwrite(self, filename, data, flags):
617 data = self._filter("decode", filename, data)
620 data = self._filter("decode", filename, data)
618 try:
621 try:
619 os.unlink(self.wjoin(filename))
622 os.unlink(self.wjoin(filename))
620 except OSError:
623 except OSError:
621 pass
624 pass
622 if 'l' in flags:
625 if 'l' in flags:
623 self.wopener.symlink(data, filename)
626 self.wopener.symlink(data, filename)
624 else:
627 else:
625 self.wopener(filename, 'w').write(data)
628 self.wopener(filename, 'w').write(data)
626 if 'x' in flags:
629 if 'x' in flags:
627 util.set_flags(self.wjoin(filename), False, True)
630 util.set_flags(self.wjoin(filename), False, True)
628
631
629 def wwritedata(self, filename, data):
632 def wwritedata(self, filename, data):
630 return self._filter("decode", filename, data)
633 return self._filter("decode", filename, data)
631
634
632 def transaction(self):
635 def transaction(self):
633 tr = self._transref and self._transref() or None
636 tr = self._transref and self._transref() or None
634 if tr and tr.running():
637 if tr and tr.running():
635 return tr.nest()
638 return tr.nest()
636
639
637 # abort here if the journal already exists
640 # abort here if the journal already exists
638 if os.path.exists(self.sjoin("journal")):
641 if os.path.exists(self.sjoin("journal")):
639 raise error.RepoError(_("journal already exists - run hg recover"))
642 raise error.RepoError(_("journal already exists - run hg recover"))
640
643
641 # save dirstate for rollback
644 # save dirstate for rollback
642 try:
645 try:
643 ds = self.opener("dirstate").read()
646 ds = self.opener("dirstate").read()
644 except IOError:
647 except IOError:
645 ds = ""
648 ds = ""
646 self.opener("journal.dirstate", "w").write(ds)
649 self.opener("journal.dirstate", "w").write(ds)
647 self.opener("journal.branch", "w").write(self.dirstate.branch())
650 self.opener("journal.branch", "w").write(self.dirstate.branch())
648
651
649 renames = [(self.sjoin("journal"), self.sjoin("undo")),
652 renames = [(self.sjoin("journal"), self.sjoin("undo")),
650 (self.join("journal.dirstate"), self.join("undo.dirstate")),
653 (self.join("journal.dirstate"), self.join("undo.dirstate")),
651 (self.join("journal.branch"), self.join("undo.branch"))]
654 (self.join("journal.branch"), self.join("undo.branch"))]
652 tr = transaction.transaction(self.ui.warn, self.sopener,
655 tr = transaction.transaction(self.ui.warn, self.sopener,
653 self.sjoin("journal"),
656 self.sjoin("journal"),
654 aftertrans(renames),
657 aftertrans(renames),
655 self.store.createmode)
658 self.store.createmode)
656 self._transref = weakref.ref(tr)
659 self._transref = weakref.ref(tr)
657 return tr
660 return tr
658
661
659 def recover(self):
662 def recover(self):
660 lock = self.lock()
663 lock = self.lock()
661 try:
664 try:
662 if os.path.exists(self.sjoin("journal")):
665 if os.path.exists(self.sjoin("journal")):
663 self.ui.status(_("rolling back interrupted transaction\n"))
666 self.ui.status(_("rolling back interrupted transaction\n"))
664 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
667 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
665 self.invalidate()
668 self.invalidate()
666 return True
669 return True
667 else:
670 else:
668 self.ui.warn(_("no interrupted transaction available\n"))
671 self.ui.warn(_("no interrupted transaction available\n"))
669 return False
672 return False
670 finally:
673 finally:
671 lock.release()
674 lock.release()
672
675
673 def rollback(self):
676 def rollback(self):
674 wlock = lock = None
677 wlock = lock = None
675 try:
678 try:
676 wlock = self.wlock()
679 wlock = self.wlock()
677 lock = self.lock()
680 lock = self.lock()
678 if os.path.exists(self.sjoin("undo")):
681 if os.path.exists(self.sjoin("undo")):
679 self.ui.status(_("rolling back last transaction\n"))
682 self.ui.status(_("rolling back last transaction\n"))
680 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
683 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
681 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
684 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
682 try:
685 try:
683 branch = self.opener("undo.branch").read()
686 branch = self.opener("undo.branch").read()
684 self.dirstate.setbranch(branch)
687 self.dirstate.setbranch(branch)
685 except IOError:
688 except IOError:
686 self.ui.warn(_("Named branch could not be reset, "
689 self.ui.warn(_("Named branch could not be reset, "
687 "current branch still is: %s\n")
690 "current branch still is: %s\n")
688 % encoding.tolocal(self.dirstate.branch()))
691 % encoding.tolocal(self.dirstate.branch()))
689 self.invalidate()
692 self.invalidate()
690 self.dirstate.invalidate()
693 self.dirstate.invalidate()
691 else:
694 else:
692 self.ui.warn(_("no rollback information available\n"))
695 self.ui.warn(_("no rollback information available\n"))
693 finally:
696 finally:
694 release(lock, wlock)
697 release(lock, wlock)
695
698
696 def invalidate(self):
699 def invalidate(self):
697 for a in "changelog manifest".split():
700 for a in "changelog manifest".split():
698 if a in self.__dict__:
701 if a in self.__dict__:
699 delattr(self, a)
702 delattr(self, a)
700 self._tags = None
703 self._tags = None
701 self._tagtypes = None
704 self._tagtypes = None
702 self.nodetagscache = None
705 self.nodetagscache = None
703 self.branchcache = None
706 self.branchcache = None
704 self._ubranchcache = None
707 self._ubranchcache = None
705 self._branchcachetip = None
708 self._branchcachetip = None
706
709
707 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
710 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
708 try:
711 try:
709 l = lock.lock(lockname, 0, releasefn, desc=desc)
712 l = lock.lock(lockname, 0, releasefn, desc=desc)
710 except error.LockHeld, inst:
713 except error.LockHeld, inst:
711 if not wait:
714 if not wait:
712 raise
715 raise
713 self.ui.warn(_("waiting for lock on %s held by %r\n") %
716 self.ui.warn(_("waiting for lock on %s held by %r\n") %
714 (desc, inst.locker))
717 (desc, inst.locker))
715 # default to 600 seconds timeout
718 # default to 600 seconds timeout
716 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
719 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
717 releasefn, desc=desc)
720 releasefn, desc=desc)
718 if acquirefn:
721 if acquirefn:
719 acquirefn()
722 acquirefn()
720 return l
723 return l
721
724
722 def lock(self, wait=True):
725 def lock(self, wait=True):
723 l = self._lockref and self._lockref()
726 l = self._lockref and self._lockref()
724 if l is not None and l.held:
727 if l is not None and l.held:
725 l.lock()
728 l.lock()
726 return l
729 return l
727
730
728 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
731 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
729 _('repository %s') % self.origroot)
732 _('repository %s') % self.origroot)
730 self._lockref = weakref.ref(l)
733 self._lockref = weakref.ref(l)
731 return l
734 return l
732
735
733 def wlock(self, wait=True):
736 def wlock(self, wait=True):
734 l = self._wlockref and self._wlockref()
737 l = self._wlockref and self._wlockref()
735 if l is not None and l.held:
738 if l is not None and l.held:
736 l.lock()
739 l.lock()
737 return l
740 return l
738
741
739 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
742 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
740 self.dirstate.invalidate, _('working directory of %s') %
743 self.dirstate.invalidate, _('working directory of %s') %
741 self.origroot)
744 self.origroot)
742 self._wlockref = weakref.ref(l)
745 self._wlockref = weakref.ref(l)
743 return l
746 return l
744
747
745 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
748 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
746 """
749 """
747 commit an individual file as part of a larger transaction
750 commit an individual file as part of a larger transaction
748 """
751 """
749
752
750 fname = fctx.path()
753 fname = fctx.path()
751 text = fctx.data()
754 text = fctx.data()
752 flog = self.file(fname)
755 flog = self.file(fname)
753 fparent1 = manifest1.get(fname, nullid)
756 fparent1 = manifest1.get(fname, nullid)
754 fparent2 = fparent2o = manifest2.get(fname, nullid)
757 fparent2 = fparent2o = manifest2.get(fname, nullid)
755
758
756 meta = {}
759 meta = {}
757 copy = fctx.renamed()
760 copy = fctx.renamed()
758 if copy and copy[0] != fname:
761 if copy and copy[0] != fname:
759 # Mark the new revision of this file as a copy of another
762 # Mark the new revision of this file as a copy of another
760 # file. This copy data will effectively act as a parent
763 # file. This copy data will effectively act as a parent
761 # of this new revision. If this is a merge, the first
764 # of this new revision. If this is a merge, the first
762 # parent will be the nullid (meaning "look up the copy data")
765 # parent will be the nullid (meaning "look up the copy data")
763 # and the second one will be the other parent. For example:
766 # and the second one will be the other parent. For example:
764 #
767 #
765 # 0 --- 1 --- 3 rev1 changes file foo
768 # 0 --- 1 --- 3 rev1 changes file foo
766 # \ / rev2 renames foo to bar and changes it
769 # \ / rev2 renames foo to bar and changes it
767 # \- 2 -/ rev3 should have bar with all changes and
770 # \- 2 -/ rev3 should have bar with all changes and
768 # should record that bar descends from
771 # should record that bar descends from
769 # bar in rev2 and foo in rev1
772 # bar in rev2 and foo in rev1
770 #
773 #
771 # this allows this merge to succeed:
774 # this allows this merge to succeed:
772 #
775 #
773 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
776 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
774 # \ / merging rev3 and rev4 should use bar@rev2
777 # \ / merging rev3 and rev4 should use bar@rev2
775 # \- 2 --- 4 as the merge base
778 # \- 2 --- 4 as the merge base
776 #
779 #
777
780
778 cfname = copy[0]
781 cfname = copy[0]
779 crev = manifest1.get(cfname)
782 crev = manifest1.get(cfname)
780 newfparent = fparent2
783 newfparent = fparent2
781
784
782 if manifest2: # branch merge
785 if manifest2: # branch merge
783 if fparent2 == nullid or crev is None: # copied on remote side
786 if fparent2 == nullid or crev is None: # copied on remote side
784 if cfname in manifest2:
787 if cfname in manifest2:
785 crev = manifest2[cfname]
788 crev = manifest2[cfname]
786 newfparent = fparent1
789 newfparent = fparent1
787
790
788 # find source in nearest ancestor if we've lost track
791 # find source in nearest ancestor if we've lost track
789 if not crev:
792 if not crev:
790 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
793 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
791 (fname, cfname))
794 (fname, cfname))
792 for ancestor in self['.'].ancestors():
795 for ancestor in self['.'].ancestors():
793 if cfname in ancestor:
796 if cfname in ancestor:
794 crev = ancestor[cfname].filenode()
797 crev = ancestor[cfname].filenode()
795 break
798 break
796
799
797 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
800 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
798 meta["copy"] = cfname
801 meta["copy"] = cfname
799 meta["copyrev"] = hex(crev)
802 meta["copyrev"] = hex(crev)
800 fparent1, fparent2 = nullid, newfparent
803 fparent1, fparent2 = nullid, newfparent
801 elif fparent2 != nullid:
804 elif fparent2 != nullid:
802 # is one parent an ancestor of the other?
805 # is one parent an ancestor of the other?
803 fparentancestor = flog.ancestor(fparent1, fparent2)
806 fparentancestor = flog.ancestor(fparent1, fparent2)
804 if fparentancestor == fparent1:
807 if fparentancestor == fparent1:
805 fparent1, fparent2 = fparent2, nullid
808 fparent1, fparent2 = fparent2, nullid
806 elif fparentancestor == fparent2:
809 elif fparentancestor == fparent2:
807 fparent2 = nullid
810 fparent2 = nullid
808
811
809 # is the file changed?
812 # is the file changed?
810 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
813 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
811 changelist.append(fname)
814 changelist.append(fname)
812 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
815 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
813
816
814 # are just the flags changed during merge?
817 # are just the flags changed during merge?
815 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
818 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
816 changelist.append(fname)
819 changelist.append(fname)
817
820
818 return fparent1
821 return fparent1
819
822
820 def commit(self, text="", user=None, date=None, match=None, force=False,
823 def commit(self, text="", user=None, date=None, match=None, force=False,
821 editor=False, extra={}):
824 editor=False, extra={}):
822 """Add a new revision to current repository.
825 """Add a new revision to current repository.
823
826
824 Revision information is gathered from the working directory,
827 Revision information is gathered from the working directory,
825 match can be used to filter the committed files. If editor is
828 match can be used to filter the committed files. If editor is
826 supplied, it is called to get a commit message.
829 supplied, it is called to get a commit message.
827 """
830 """
828
831
829 def fail(f, msg):
832 def fail(f, msg):
830 raise util.Abort('%s: %s' % (f, msg))
833 raise util.Abort('%s: %s' % (f, msg))
831
834
832 if not match:
835 if not match:
833 match = match_.always(self.root, '')
836 match = match_.always(self.root, '')
834
837
835 if not force:
838 if not force:
836 vdirs = []
839 vdirs = []
837 match.dir = vdirs.append
840 match.dir = vdirs.append
838 match.bad = fail
841 match.bad = fail
839
842
840 wlock = self.wlock()
843 wlock = self.wlock()
841 try:
844 try:
842 p1, p2 = self.dirstate.parents()
845 p1, p2 = self.dirstate.parents()
843 wctx = self[None]
846 wctx = self[None]
844
847
845 if (not force and p2 != nullid and match and
848 if (not force and p2 != nullid and match and
846 (match.files() or match.anypats())):
849 (match.files() or match.anypats())):
847 raise util.Abort(_('cannot partially commit a merge '
850 raise util.Abort(_('cannot partially commit a merge '
848 '(do not specify files or patterns)'))
851 '(do not specify files or patterns)'))
849
852
850 changes = self.status(match=match, clean=force)
853 changes = self.status(match=match, clean=force)
851 if force:
854 if force:
852 changes[0].extend(changes[6]) # mq may commit unchanged files
855 changes[0].extend(changes[6]) # mq may commit unchanged files
853
856
854 # check subrepos
857 # check subrepos
855 subs = []
858 subs = []
856 for s in wctx.substate:
859 for s in wctx.substate:
857 if match(s) and wctx.sub(s).dirty():
860 if match(s) and wctx.sub(s).dirty():
858 subs.append(s)
861 subs.append(s)
859 if subs and '.hgsubstate' not in changes[0]:
862 if subs and '.hgsubstate' not in changes[0]:
860 changes[0].insert(0, '.hgsubstate')
863 changes[0].insert(0, '.hgsubstate')
861
864
862 # make sure all explicit patterns are matched
865 # make sure all explicit patterns are matched
863 if not force and match.files():
866 if not force and match.files():
864 matched = set(changes[0] + changes[1] + changes[2])
867 matched = set(changes[0] + changes[1] + changes[2])
865
868
866 for f in match.files():
869 for f in match.files():
867 if f == '.' or f in matched or f in wctx.substate:
870 if f == '.' or f in matched or f in wctx.substate:
868 continue
871 continue
869 if f in changes[3]: # missing
872 if f in changes[3]: # missing
870 fail(f, _('file not found!'))
873 fail(f, _('file not found!'))
871 if f in vdirs: # visited directory
874 if f in vdirs: # visited directory
872 d = f + '/'
875 d = f + '/'
873 for mf in matched:
876 for mf in matched:
874 if mf.startswith(d):
877 if mf.startswith(d):
875 break
878 break
876 else:
879 else:
877 fail(f, _("no match under directory!"))
880 fail(f, _("no match under directory!"))
878 elif f not in self.dirstate:
881 elif f not in self.dirstate:
879 fail(f, _("file not tracked!"))
882 fail(f, _("file not tracked!"))
880
883
881 if (not force and not extra.get("close") and p2 == nullid
884 if (not force and not extra.get("close") and p2 == nullid
882 and not (changes[0] or changes[1] or changes[2])
885 and not (changes[0] or changes[1] or changes[2])
883 and self[None].branch() == self['.'].branch()):
886 and self[None].branch() == self['.'].branch()):
884 return None
887 return None
885
888
886 ms = merge_.mergestate(self)
889 ms = merge_.mergestate(self)
887 for f in changes[0]:
890 for f in changes[0]:
888 if f in ms and ms[f] == 'u':
891 if f in ms and ms[f] == 'u':
889 raise util.Abort(_("unresolved merge conflicts "
892 raise util.Abort(_("unresolved merge conflicts "
890 "(see hg resolve)"))
893 "(see hg resolve)"))
891
894
892 cctx = context.workingctx(self, (p1, p2), text, user, date,
895 cctx = context.workingctx(self, (p1, p2), text, user, date,
893 extra, changes)
896 extra, changes)
894 if editor:
897 if editor:
895 cctx._text = editor(self, cctx, subs)
898 cctx._text = editor(self, cctx, subs)
896
899
897 # commit subs
900 # commit subs
898 if subs:
901 if subs:
899 state = wctx.substate.copy()
902 state = wctx.substate.copy()
900 for s in subs:
903 for s in subs:
901 self.ui.status(_('committing subrepository %s\n') % s)
904 self.ui.status(_('committing subrepository %s\n') % s)
902 sr = wctx.sub(s).commit(cctx._text, user, date)
905 sr = wctx.sub(s).commit(cctx._text, user, date)
903 state[s] = (state[s][0], sr)
906 state[s] = (state[s][0], sr)
904 subrepo.writestate(self, state)
907 subrepo.writestate(self, state)
905
908
906 ret = self.commitctx(cctx, True)
909 ret = self.commitctx(cctx, True)
907
910
908 # update dirstate and mergestate
911 # update dirstate and mergestate
909 for f in changes[0] + changes[1]:
912 for f in changes[0] + changes[1]:
910 self.dirstate.normal(f)
913 self.dirstate.normal(f)
911 for f in changes[2]:
914 for f in changes[2]:
912 self.dirstate.forget(f)
915 self.dirstate.forget(f)
913 self.dirstate.setparents(ret)
916 self.dirstate.setparents(ret)
914 ms.reset()
917 ms.reset()
915
918
916 return ret
919 return ret
917
920
918 finally:
921 finally:
919 wlock.release()
922 wlock.release()
920
923
921 def commitctx(self, ctx, error=False):
924 def commitctx(self, ctx, error=False):
922 """Add a new revision to current repository.
925 """Add a new revision to current repository.
923
926
924 Revision information is passed via the context argument.
927 Revision information is passed via the context argument.
925 """
928 """
926
929
927 tr = lock = None
930 tr = lock = None
928 removed = ctx.removed()
931 removed = ctx.removed()
929 p1, p2 = ctx.p1(), ctx.p2()
932 p1, p2 = ctx.p1(), ctx.p2()
930 m1 = p1.manifest().copy()
933 m1 = p1.manifest().copy()
931 m2 = p2.manifest()
934 m2 = p2.manifest()
932 user = ctx.user()
935 user = ctx.user()
933
936
934 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
937 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
935 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
938 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
936
939
937 lock = self.lock()
940 lock = self.lock()
938 try:
941 try:
939 tr = self.transaction()
942 tr = self.transaction()
940 trp = weakref.proxy(tr)
943 trp = weakref.proxy(tr)
941
944
942 # check in files
945 # check in files
943 new = {}
946 new = {}
944 changed = []
947 changed = []
945 linkrev = len(self)
948 linkrev = len(self)
946 for f in sorted(ctx.modified() + ctx.added()):
949 for f in sorted(ctx.modified() + ctx.added()):
947 self.ui.note(f + "\n")
950 self.ui.note(f + "\n")
948 try:
951 try:
949 fctx = ctx[f]
952 fctx = ctx[f]
950 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
953 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
951 changed)
954 changed)
952 m1.set(f, fctx.flags())
955 m1.set(f, fctx.flags())
953 except (OSError, IOError):
956 except (OSError, IOError):
954 if error:
957 if error:
955 self.ui.warn(_("trouble committing %s!\n") % f)
958 self.ui.warn(_("trouble committing %s!\n") % f)
956 raise
959 raise
957 else:
960 else:
958 removed.append(f)
961 removed.append(f)
959
962
960 # update manifest
963 # update manifest
961 m1.update(new)
964 m1.update(new)
962 removed = [f for f in sorted(removed) if f in m1 or f in m2]
965 removed = [f for f in sorted(removed) if f in m1 or f in m2]
963 drop = [f for f in removed if f in m1]
966 drop = [f for f in removed if f in m1]
964 for f in drop:
967 for f in drop:
965 del m1[f]
968 del m1[f]
966 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
969 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
967 p2.manifestnode(), (new, drop))
970 p2.manifestnode(), (new, drop))
968
971
969 # update changelog
972 # update changelog
970 self.changelog.delayupdate()
973 self.changelog.delayupdate()
971 n = self.changelog.add(mn, changed + removed, ctx.description(),
974 n = self.changelog.add(mn, changed + removed, ctx.description(),
972 trp, p1.node(), p2.node(),
975 trp, p1.node(), p2.node(),
973 user, ctx.date(), ctx.extra().copy())
976 user, ctx.date(), ctx.extra().copy())
974 p = lambda: self.changelog.writepending() and self.root or ""
977 p = lambda: self.changelog.writepending() and self.root or ""
975 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
978 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
976 parent2=xp2, pending=p)
979 parent2=xp2, pending=p)
977 self.changelog.finalize(trp)
980 self.changelog.finalize(trp)
978 tr.close()
981 tr.close()
979
982
980 if self.branchcache:
983 if self.branchcache:
981 self.branchtags()
984 self.branchtags()
982
985
983 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
986 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
984 return n
987 return n
985 finally:
988 finally:
986 del tr
989 del tr
987 lock.release()
990 lock.release()
988
991
989 def walk(self, match, node=None):
992 def walk(self, match, node=None):
990 '''
993 '''
991 walk recursively through the directory tree or a given
994 walk recursively through the directory tree or a given
992 changeset, finding all files matched by the match
995 changeset, finding all files matched by the match
993 function
996 function
994 '''
997 '''
995 return self[node].walk(match)
998 return self[node].walk(match)
996
999
997 def status(self, node1='.', node2=None, match=None,
1000 def status(self, node1='.', node2=None, match=None,
998 ignored=False, clean=False, unknown=False):
1001 ignored=False, clean=False, unknown=False):
999 """return status of files between two nodes or node and working directory
1002 """return status of files between two nodes or node and working directory
1000
1003
1001 If node1 is None, use the first dirstate parent instead.
1004 If node1 is None, use the first dirstate parent instead.
1002 If node2 is None, compare node1 with working directory.
1005 If node2 is None, compare node1 with working directory.
1003 """
1006 """
1004
1007
1005 def mfmatches(ctx):
1008 def mfmatches(ctx):
1006 mf = ctx.manifest().copy()
1009 mf = ctx.manifest().copy()
1007 for fn in mf.keys():
1010 for fn in mf.keys():
1008 if not match(fn):
1011 if not match(fn):
1009 del mf[fn]
1012 del mf[fn]
1010 return mf
1013 return mf
1011
1014
1012 if isinstance(node1, context.changectx):
1015 if isinstance(node1, context.changectx):
1013 ctx1 = node1
1016 ctx1 = node1
1014 else:
1017 else:
1015 ctx1 = self[node1]
1018 ctx1 = self[node1]
1016 if isinstance(node2, context.changectx):
1019 if isinstance(node2, context.changectx):
1017 ctx2 = node2
1020 ctx2 = node2
1018 else:
1021 else:
1019 ctx2 = self[node2]
1022 ctx2 = self[node2]
1020
1023
1021 working = ctx2.rev() is None
1024 working = ctx2.rev() is None
1022 parentworking = working and ctx1 == self['.']
1025 parentworking = working and ctx1 == self['.']
1023 match = match or match_.always(self.root, self.getcwd())
1026 match = match or match_.always(self.root, self.getcwd())
1024 listignored, listclean, listunknown = ignored, clean, unknown
1027 listignored, listclean, listunknown = ignored, clean, unknown
1025
1028
1026 # load earliest manifest first for caching reasons
1029 # load earliest manifest first for caching reasons
1027 if not working and ctx2.rev() < ctx1.rev():
1030 if not working and ctx2.rev() < ctx1.rev():
1028 ctx2.manifest()
1031 ctx2.manifest()
1029
1032
1030 if not parentworking:
1033 if not parentworking:
1031 def bad(f, msg):
1034 def bad(f, msg):
1032 if f not in ctx1:
1035 if f not in ctx1:
1033 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1036 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1034 match.bad = bad
1037 match.bad = bad
1035
1038
1036 if working: # we need to scan the working dir
1039 if working: # we need to scan the working dir
1037 s = self.dirstate.status(match, listignored, listclean, listunknown)
1040 s = self.dirstate.status(match, listignored, listclean, listunknown)
1038 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1041 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1039
1042
1040 # check for any possibly clean files
1043 # check for any possibly clean files
1041 if parentworking and cmp:
1044 if parentworking and cmp:
1042 fixup = []
1045 fixup = []
1043 # do a full compare of any files that might have changed
1046 # do a full compare of any files that might have changed
1044 for f in sorted(cmp):
1047 for f in sorted(cmp):
1045 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1048 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1046 or ctx1[f].cmp(ctx2[f].data())):
1049 or ctx1[f].cmp(ctx2[f].data())):
1047 modified.append(f)
1050 modified.append(f)
1048 else:
1051 else:
1049 fixup.append(f)
1052 fixup.append(f)
1050
1053
1051 if listclean:
1054 if listclean:
1052 clean += fixup
1055 clean += fixup
1053
1056
1054 # update dirstate for files that are actually clean
1057 # update dirstate for files that are actually clean
1055 if fixup:
1058 if fixup:
1056 try:
1059 try:
1057 # updating the dirstate is optional
1060 # updating the dirstate is optional
1058 # so we don't wait on the lock
1061 # so we don't wait on the lock
1059 wlock = self.wlock(False)
1062 wlock = self.wlock(False)
1060 try:
1063 try:
1061 for f in fixup:
1064 for f in fixup:
1062 self.dirstate.normal(f)
1065 self.dirstate.normal(f)
1063 finally:
1066 finally:
1064 wlock.release()
1067 wlock.release()
1065 except error.LockError:
1068 except error.LockError:
1066 pass
1069 pass
1067
1070
1068 if not parentworking:
1071 if not parentworking:
1069 mf1 = mfmatches(ctx1)
1072 mf1 = mfmatches(ctx1)
1070 if working:
1073 if working:
1071 # we are comparing working dir against non-parent
1074 # we are comparing working dir against non-parent
1072 # generate a pseudo-manifest for the working dir
1075 # generate a pseudo-manifest for the working dir
1073 mf2 = mfmatches(self['.'])
1076 mf2 = mfmatches(self['.'])
1074 for f in cmp + modified + added:
1077 for f in cmp + modified + added:
1075 mf2[f] = None
1078 mf2[f] = None
1076 mf2.set(f, ctx2.flags(f))
1079 mf2.set(f, ctx2.flags(f))
1077 for f in removed:
1080 for f in removed:
1078 if f in mf2:
1081 if f in mf2:
1079 del mf2[f]
1082 del mf2[f]
1080 else:
1083 else:
1081 # we are comparing two revisions
1084 # we are comparing two revisions
1082 deleted, unknown, ignored = [], [], []
1085 deleted, unknown, ignored = [], [], []
1083 mf2 = mfmatches(ctx2)
1086 mf2 = mfmatches(ctx2)
1084
1087
1085 modified, added, clean = [], [], []
1088 modified, added, clean = [], [], []
1086 for fn in mf2:
1089 for fn in mf2:
1087 if fn in mf1:
1090 if fn in mf1:
1088 if (mf1.flags(fn) != mf2.flags(fn) or
1091 if (mf1.flags(fn) != mf2.flags(fn) or
1089 (mf1[fn] != mf2[fn] and
1092 (mf1[fn] != mf2[fn] and
1090 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1093 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1091 modified.append(fn)
1094 modified.append(fn)
1092 elif listclean:
1095 elif listclean:
1093 clean.append(fn)
1096 clean.append(fn)
1094 del mf1[fn]
1097 del mf1[fn]
1095 else:
1098 else:
1096 added.append(fn)
1099 added.append(fn)
1097 removed = mf1.keys()
1100 removed = mf1.keys()
1098
1101
1099 r = modified, added, removed, deleted, unknown, ignored, clean
1102 r = modified, added, removed, deleted, unknown, ignored, clean
1100 [l.sort() for l in r]
1103 [l.sort() for l in r]
1101 return r
1104 return r
1102
1105
1103 def add(self, list):
1106 def add(self, list):
1104 wlock = self.wlock()
1107 wlock = self.wlock()
1105 try:
1108 try:
1106 rejected = []
1109 rejected = []
1107 for f in list:
1110 for f in list:
1108 p = self.wjoin(f)
1111 p = self.wjoin(f)
1109 try:
1112 try:
1110 st = os.lstat(p)
1113 st = os.lstat(p)
1111 except:
1114 except:
1112 self.ui.warn(_("%s does not exist!\n") % f)
1115 self.ui.warn(_("%s does not exist!\n") % f)
1113 rejected.append(f)
1116 rejected.append(f)
1114 continue
1117 continue
1115 if st.st_size > 10000000:
1118 if st.st_size > 10000000:
1116 self.ui.warn(_("%s: files over 10MB may cause memory and"
1119 self.ui.warn(_("%s: files over 10MB may cause memory and"
1117 " performance problems\n"
1120 " performance problems\n"
1118 "(use 'hg revert %s' to unadd the file)\n")
1121 "(use 'hg revert %s' to unadd the file)\n")
1119 % (f, f))
1122 % (f, f))
1120 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1123 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1121 self.ui.warn(_("%s not added: only files and symlinks "
1124 self.ui.warn(_("%s not added: only files and symlinks "
1122 "supported currently\n") % f)
1125 "supported currently\n") % f)
1123 rejected.append(p)
1126 rejected.append(p)
1124 elif self.dirstate[f] in 'amn':
1127 elif self.dirstate[f] in 'amn':
1125 self.ui.warn(_("%s already tracked!\n") % f)
1128 self.ui.warn(_("%s already tracked!\n") % f)
1126 elif self.dirstate[f] == 'r':
1129 elif self.dirstate[f] == 'r':
1127 self.dirstate.normallookup(f)
1130 self.dirstate.normallookup(f)
1128 else:
1131 else:
1129 self.dirstate.add(f)
1132 self.dirstate.add(f)
1130 return rejected
1133 return rejected
1131 finally:
1134 finally:
1132 wlock.release()
1135 wlock.release()
1133
1136
1134 def forget(self, list):
1137 def forget(self, list):
1135 wlock = self.wlock()
1138 wlock = self.wlock()
1136 try:
1139 try:
1137 for f in list:
1140 for f in list:
1138 if self.dirstate[f] != 'a':
1141 if self.dirstate[f] != 'a':
1139 self.ui.warn(_("%s not added!\n") % f)
1142 self.ui.warn(_("%s not added!\n") % f)
1140 else:
1143 else:
1141 self.dirstate.forget(f)
1144 self.dirstate.forget(f)
1142 finally:
1145 finally:
1143 wlock.release()
1146 wlock.release()
1144
1147
1145 def remove(self, list, unlink=False):
1148 def remove(self, list, unlink=False):
1146 if unlink:
1149 if unlink:
1147 for f in list:
1150 for f in list:
1148 try:
1151 try:
1149 util.unlink(self.wjoin(f))
1152 util.unlink(self.wjoin(f))
1150 except OSError, inst:
1153 except OSError, inst:
1151 if inst.errno != errno.ENOENT:
1154 if inst.errno != errno.ENOENT:
1152 raise
1155 raise
1153 wlock = self.wlock()
1156 wlock = self.wlock()
1154 try:
1157 try:
1155 for f in list:
1158 for f in list:
1156 if unlink and os.path.exists(self.wjoin(f)):
1159 if unlink and os.path.exists(self.wjoin(f)):
1157 self.ui.warn(_("%s still exists!\n") % f)
1160 self.ui.warn(_("%s still exists!\n") % f)
1158 elif self.dirstate[f] == 'a':
1161 elif self.dirstate[f] == 'a':
1159 self.dirstate.forget(f)
1162 self.dirstate.forget(f)
1160 elif f not in self.dirstate:
1163 elif f not in self.dirstate:
1161 self.ui.warn(_("%s not tracked!\n") % f)
1164 self.ui.warn(_("%s not tracked!\n") % f)
1162 else:
1165 else:
1163 self.dirstate.remove(f)
1166 self.dirstate.remove(f)
1164 finally:
1167 finally:
1165 wlock.release()
1168 wlock.release()
1166
1169
1167 def undelete(self, list):
1170 def undelete(self, list):
1168 manifests = [self.manifest.read(self.changelog.read(p)[0])
1171 manifests = [self.manifest.read(self.changelog.read(p)[0])
1169 for p in self.dirstate.parents() if p != nullid]
1172 for p in self.dirstate.parents() if p != nullid]
1170 wlock = self.wlock()
1173 wlock = self.wlock()
1171 try:
1174 try:
1172 for f in list:
1175 for f in list:
1173 if self.dirstate[f] != 'r':
1176 if self.dirstate[f] != 'r':
1174 self.ui.warn(_("%s not removed!\n") % f)
1177 self.ui.warn(_("%s not removed!\n") % f)
1175 else:
1178 else:
1176 m = f in manifests[0] and manifests[0] or manifests[1]
1179 m = f in manifests[0] and manifests[0] or manifests[1]
1177 t = self.file(f).read(m[f])
1180 t = self.file(f).read(m[f])
1178 self.wwrite(f, t, m.flags(f))
1181 self.wwrite(f, t, m.flags(f))
1179 self.dirstate.normal(f)
1182 self.dirstate.normal(f)
1180 finally:
1183 finally:
1181 wlock.release()
1184 wlock.release()
1182
1185
1183 def copy(self, source, dest):
1186 def copy(self, source, dest):
1184 p = self.wjoin(dest)
1187 p = self.wjoin(dest)
1185 if not (os.path.exists(p) or os.path.islink(p)):
1188 if not (os.path.exists(p) or os.path.islink(p)):
1186 self.ui.warn(_("%s does not exist!\n") % dest)
1189 self.ui.warn(_("%s does not exist!\n") % dest)
1187 elif not (os.path.isfile(p) or os.path.islink(p)):
1190 elif not (os.path.isfile(p) or os.path.islink(p)):
1188 self.ui.warn(_("copy failed: %s is not a file or a "
1191 self.ui.warn(_("copy failed: %s is not a file or a "
1189 "symbolic link\n") % dest)
1192 "symbolic link\n") % dest)
1190 else:
1193 else:
1191 wlock = self.wlock()
1194 wlock = self.wlock()
1192 try:
1195 try:
1193 if self.dirstate[dest] in '?r':
1196 if self.dirstate[dest] in '?r':
1194 self.dirstate.add(dest)
1197 self.dirstate.add(dest)
1195 self.dirstate.copy(source, dest)
1198 self.dirstate.copy(source, dest)
1196 finally:
1199 finally:
1197 wlock.release()
1200 wlock.release()
1198
1201
1199 def heads(self, start=None):
1202 def heads(self, start=None):
1200 heads = self.changelog.heads(start)
1203 heads = self.changelog.heads(start)
1201 # sort the output in rev descending order
1204 # sort the output in rev descending order
1202 heads = [(-self.changelog.rev(h), h) for h in heads]
1205 heads = [(-self.changelog.rev(h), h) for h in heads]
1203 return [n for (r, n) in sorted(heads)]
1206 return [n for (r, n) in sorted(heads)]
1204
1207
1205 def branchheads(self, branch=None, start=None, closed=False):
1208 def branchheads(self, branch=None, start=None, closed=False):
1206 if branch is None:
1209 if branch is None:
1207 branch = self[None].branch()
1210 branch = self[None].branch()
1208 branches = self.branchmap()
1211 branches = self.branchmap()
1209 if branch not in branches:
1212 if branch not in branches:
1210 return []
1213 return []
1211 bheads = branches[branch]
1214 bheads = branches[branch]
1212 # the cache returns heads ordered lowest to highest
1215 # the cache returns heads ordered lowest to highest
1213 bheads.reverse()
1216 bheads.reverse()
1214 if start is not None:
1217 if start is not None:
1215 # filter out the heads that cannot be reached from startrev
1218 # filter out the heads that cannot be reached from startrev
1216 bheads = self.changelog.nodesbetween([start], bheads)[2]
1219 bheads = self.changelog.nodesbetween([start], bheads)[2]
1217 if not closed:
1220 if not closed:
1218 bheads = [h for h in bheads if
1221 bheads = [h for h in bheads if
1219 ('close' not in self.changelog.read(h)[5])]
1222 ('close' not in self.changelog.read(h)[5])]
1220 return bheads
1223 return bheads
1221
1224
1222 def branches(self, nodes):
1225 def branches(self, nodes):
1223 if not nodes:
1226 if not nodes:
1224 nodes = [self.changelog.tip()]
1227 nodes = [self.changelog.tip()]
1225 b = []
1228 b = []
1226 for n in nodes:
1229 for n in nodes:
1227 t = n
1230 t = n
1228 while 1:
1231 while 1:
1229 p = self.changelog.parents(n)
1232 p = self.changelog.parents(n)
1230 if p[1] != nullid or p[0] == nullid:
1233 if p[1] != nullid or p[0] == nullid:
1231 b.append((t, n, p[0], p[1]))
1234 b.append((t, n, p[0], p[1]))
1232 break
1235 break
1233 n = p[0]
1236 n = p[0]
1234 return b
1237 return b
1235
1238
1236 def between(self, pairs):
1239 def between(self, pairs):
1237 r = []
1240 r = []
1238
1241
1239 for top, bottom in pairs:
1242 for top, bottom in pairs:
1240 n, l, i = top, [], 0
1243 n, l, i = top, [], 0
1241 f = 1
1244 f = 1
1242
1245
1243 while n != bottom and n != nullid:
1246 while n != bottom and n != nullid:
1244 p = self.changelog.parents(n)[0]
1247 p = self.changelog.parents(n)[0]
1245 if i == f:
1248 if i == f:
1246 l.append(n)
1249 l.append(n)
1247 f = f * 2
1250 f = f * 2
1248 n = p
1251 n = p
1249 i += 1
1252 i += 1
1250
1253
1251 r.append(l)
1254 r.append(l)
1252
1255
1253 return r
1256 return r
1254
1257
1255 def findincoming(self, remote, base=None, heads=None, force=False):
1258 def findincoming(self, remote, base=None, heads=None, force=False):
1256 """Return list of roots of the subsets of missing nodes from remote
1259 """Return list of roots of the subsets of missing nodes from remote
1257
1260
1258 If base dict is specified, assume that these nodes and their parents
1261 If base dict is specified, assume that these nodes and their parents
1259 exist on the remote side and that no child of a node of base exists
1262 exist on the remote side and that no child of a node of base exists
1260 in both remote and self.
1263 in both remote and self.
1261 Furthermore base will be updated to include the nodes that exists
1264 Furthermore base will be updated to include the nodes that exists
1262 in self and remote but no children exists in self and remote.
1265 in self and remote but no children exists in self and remote.
1263 If a list of heads is specified, return only nodes which are heads
1266 If a list of heads is specified, return only nodes which are heads
1264 or ancestors of these heads.
1267 or ancestors of these heads.
1265
1268
1266 All the ancestors of base are in self and in remote.
1269 All the ancestors of base are in self and in remote.
1267 All the descendants of the list returned are missing in self.
1270 All the descendants of the list returned are missing in self.
1268 (and so we know that the rest of the nodes are missing in remote, see
1271 (and so we know that the rest of the nodes are missing in remote, see
1269 outgoing)
1272 outgoing)
1270 """
1273 """
1271 return self.findcommonincoming(remote, base, heads, force)[1]
1274 return self.findcommonincoming(remote, base, heads, force)[1]
1272
1275
1273 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1276 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1274 """Return a tuple (common, missing roots, heads) used to identify
1277 """Return a tuple (common, missing roots, heads) used to identify
1275 missing nodes from remote.
1278 missing nodes from remote.
1276
1279
1277 If base dict is specified, assume that these nodes and their parents
1280 If base dict is specified, assume that these nodes and their parents
1278 exist on the remote side and that no child of a node of base exists
1281 exist on the remote side and that no child of a node of base exists
1279 in both remote and self.
1282 in both remote and self.
1280 Furthermore base will be updated to include the nodes that exists
1283 Furthermore base will be updated to include the nodes that exists
1281 in self and remote but no children exists in self and remote.
1284 in self and remote but no children exists in self and remote.
1282 If a list of heads is specified, return only nodes which are heads
1285 If a list of heads is specified, return only nodes which are heads
1283 or ancestors of these heads.
1286 or ancestors of these heads.
1284
1287
1285 All the ancestors of base are in self and in remote.
1288 All the ancestors of base are in self and in remote.
1286 """
1289 """
1287 m = self.changelog.nodemap
1290 m = self.changelog.nodemap
1288 search = []
1291 search = []
1289 fetch = set()
1292 fetch = set()
1290 seen = set()
1293 seen = set()
1291 seenbranch = set()
1294 seenbranch = set()
1292 if base is None:
1295 if base is None:
1293 base = {}
1296 base = {}
1294
1297
1295 if not heads:
1298 if not heads:
1296 heads = remote.heads()
1299 heads = remote.heads()
1297
1300
1298 if self.changelog.tip() == nullid:
1301 if self.changelog.tip() == nullid:
1299 base[nullid] = 1
1302 base[nullid] = 1
1300 if heads != [nullid]:
1303 if heads != [nullid]:
1301 return [nullid], [nullid], list(heads)
1304 return [nullid], [nullid], list(heads)
1302 return [nullid], [], []
1305 return [nullid], [], []
1303
1306
1304 # assume we're closer to the tip than the root
1307 # assume we're closer to the tip than the root
1305 # and start by examining the heads
1308 # and start by examining the heads
1306 self.ui.status(_("searching for changes\n"))
1309 self.ui.status(_("searching for changes\n"))
1307
1310
1308 unknown = []
1311 unknown = []
1309 for h in heads:
1312 for h in heads:
1310 if h not in m:
1313 if h not in m:
1311 unknown.append(h)
1314 unknown.append(h)
1312 else:
1315 else:
1313 base[h] = 1
1316 base[h] = 1
1314
1317
1315 heads = unknown
1318 heads = unknown
1316 if not unknown:
1319 if not unknown:
1317 return base.keys(), [], []
1320 return base.keys(), [], []
1318
1321
1319 req = set(unknown)
1322 req = set(unknown)
1320 reqcnt = 0
1323 reqcnt = 0
1321
1324
1322 # search through remote branches
1325 # search through remote branches
1323 # a 'branch' here is a linear segment of history, with four parts:
1326 # a 'branch' here is a linear segment of history, with four parts:
1324 # head, root, first parent, second parent
1327 # head, root, first parent, second parent
1325 # (a branch always has two parents (or none) by definition)
1328 # (a branch always has two parents (or none) by definition)
1326 unknown = remote.branches(unknown)
1329 unknown = remote.branches(unknown)
1327 while unknown:
1330 while unknown:
1328 r = []
1331 r = []
1329 while unknown:
1332 while unknown:
1330 n = unknown.pop(0)
1333 n = unknown.pop(0)
1331 if n[0] in seen:
1334 if n[0] in seen:
1332 continue
1335 continue
1333
1336
1334 self.ui.debug(_("examining %s:%s\n")
1337 self.ui.debug(_("examining %s:%s\n")
1335 % (short(n[0]), short(n[1])))
1338 % (short(n[0]), short(n[1])))
1336 if n[0] == nullid: # found the end of the branch
1339 if n[0] == nullid: # found the end of the branch
1337 pass
1340 pass
1338 elif n in seenbranch:
1341 elif n in seenbranch:
1339 self.ui.debug(_("branch already found\n"))
1342 self.ui.debug(_("branch already found\n"))
1340 continue
1343 continue
1341 elif n[1] and n[1] in m: # do we know the base?
1344 elif n[1] and n[1] in m: # do we know the base?
1342 self.ui.debug(_("found incomplete branch %s:%s\n")
1345 self.ui.debug(_("found incomplete branch %s:%s\n")
1343 % (short(n[0]), short(n[1])))
1346 % (short(n[0]), short(n[1])))
1344 search.append(n[0:2]) # schedule branch range for scanning
1347 search.append(n[0:2]) # schedule branch range for scanning
1345 seenbranch.add(n)
1348 seenbranch.add(n)
1346 else:
1349 else:
1347 if n[1] not in seen and n[1] not in fetch:
1350 if n[1] not in seen and n[1] not in fetch:
1348 if n[2] in m and n[3] in m:
1351 if n[2] in m and n[3] in m:
1349 self.ui.debug(_("found new changeset %s\n") %
1352 self.ui.debug(_("found new changeset %s\n") %
1350 short(n[1]))
1353 short(n[1]))
1351 fetch.add(n[1]) # earliest unknown
1354 fetch.add(n[1]) # earliest unknown
1352 for p in n[2:4]:
1355 for p in n[2:4]:
1353 if p in m:
1356 if p in m:
1354 base[p] = 1 # latest known
1357 base[p] = 1 # latest known
1355
1358
1356 for p in n[2:4]:
1359 for p in n[2:4]:
1357 if p not in req and p not in m:
1360 if p not in req and p not in m:
1358 r.append(p)
1361 r.append(p)
1359 req.add(p)
1362 req.add(p)
1360 seen.add(n[0])
1363 seen.add(n[0])
1361
1364
1362 if r:
1365 if r:
1363 reqcnt += 1
1366 reqcnt += 1
1364 self.ui.debug(_("request %d: %s\n") %
1367 self.ui.debug(_("request %d: %s\n") %
1365 (reqcnt, " ".join(map(short, r))))
1368 (reqcnt, " ".join(map(short, r))))
1366 for p in xrange(0, len(r), 10):
1369 for p in xrange(0, len(r), 10):
1367 for b in remote.branches(r[p:p+10]):
1370 for b in remote.branches(r[p:p+10]):
1368 self.ui.debug(_("received %s:%s\n") %
1371 self.ui.debug(_("received %s:%s\n") %
1369 (short(b[0]), short(b[1])))
1372 (short(b[0]), short(b[1])))
1370 unknown.append(b)
1373 unknown.append(b)
1371
1374
1372 # do binary search on the branches we found
1375 # do binary search on the branches we found
1373 while search:
1376 while search:
1374 newsearch = []
1377 newsearch = []
1375 reqcnt += 1
1378 reqcnt += 1
1376 for n, l in zip(search, remote.between(search)):
1379 for n, l in zip(search, remote.between(search)):
1377 l.append(n[1])
1380 l.append(n[1])
1378 p = n[0]
1381 p = n[0]
1379 f = 1
1382 f = 1
1380 for i in l:
1383 for i in l:
1381 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1384 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1382 if i in m:
1385 if i in m:
1383 if f <= 2:
1386 if f <= 2:
1384 self.ui.debug(_("found new branch changeset %s\n") %
1387 self.ui.debug(_("found new branch changeset %s\n") %
1385 short(p))
1388 short(p))
1386 fetch.add(p)
1389 fetch.add(p)
1387 base[i] = 1
1390 base[i] = 1
1388 else:
1391 else:
1389 self.ui.debug(_("narrowed branch search to %s:%s\n")
1392 self.ui.debug(_("narrowed branch search to %s:%s\n")
1390 % (short(p), short(i)))
1393 % (short(p), short(i)))
1391 newsearch.append((p, i))
1394 newsearch.append((p, i))
1392 break
1395 break
1393 p, f = i, f * 2
1396 p, f = i, f * 2
1394 search = newsearch
1397 search = newsearch
1395
1398
1396 # sanity check our fetch list
1399 # sanity check our fetch list
1397 for f in fetch:
1400 for f in fetch:
1398 if f in m:
1401 if f in m:
1399 raise error.RepoError(_("already have changeset ")
1402 raise error.RepoError(_("already have changeset ")
1400 + short(f[:4]))
1403 + short(f[:4]))
1401
1404
1402 if base.keys() == [nullid]:
1405 if base.keys() == [nullid]:
1403 if force:
1406 if force:
1404 self.ui.warn(_("warning: repository is unrelated\n"))
1407 self.ui.warn(_("warning: repository is unrelated\n"))
1405 else:
1408 else:
1406 raise util.Abort(_("repository is unrelated"))
1409 raise util.Abort(_("repository is unrelated"))
1407
1410
1408 self.ui.debug(_("found new changesets starting at ") +
1411 self.ui.debug(_("found new changesets starting at ") +
1409 " ".join([short(f) for f in fetch]) + "\n")
1412 " ".join([short(f) for f in fetch]) + "\n")
1410
1413
1411 self.ui.debug(_("%d total queries\n") % reqcnt)
1414 self.ui.debug(_("%d total queries\n") % reqcnt)
1412
1415
1413 return base.keys(), list(fetch), heads
1416 return base.keys(), list(fetch), heads
1414
1417
1415 def findoutgoing(self, remote, base=None, heads=None, force=False):
1418 def findoutgoing(self, remote, base=None, heads=None, force=False):
1416 """Return list of nodes that are roots of subsets not in remote
1419 """Return list of nodes that are roots of subsets not in remote
1417
1420
1418 If base dict is specified, assume that these nodes and their parents
1421 If base dict is specified, assume that these nodes and their parents
1419 exist on the remote side.
1422 exist on the remote side.
1420 If a list of heads is specified, return only nodes which are heads
1423 If a list of heads is specified, return only nodes which are heads
1421 or ancestors of these heads, and return a second element which
1424 or ancestors of these heads, and return a second element which
1422 contains all remote heads which get new children.
1425 contains all remote heads which get new children.
1423 """
1426 """
1424 if base is None:
1427 if base is None:
1425 base = {}
1428 base = {}
1426 self.findincoming(remote, base, heads, force=force)
1429 self.findincoming(remote, base, heads, force=force)
1427
1430
1428 self.ui.debug(_("common changesets up to ")
1431 self.ui.debug(_("common changesets up to ")
1429 + " ".join(map(short, base.keys())) + "\n")
1432 + " ".join(map(short, base.keys())) + "\n")
1430
1433
1431 remain = set(self.changelog.nodemap)
1434 remain = set(self.changelog.nodemap)
1432
1435
1433 # prune everything remote has from the tree
1436 # prune everything remote has from the tree
1434 remain.remove(nullid)
1437 remain.remove(nullid)
1435 remove = base.keys()
1438 remove = base.keys()
1436 while remove:
1439 while remove:
1437 n = remove.pop(0)
1440 n = remove.pop(0)
1438 if n in remain:
1441 if n in remain:
1439 remain.remove(n)
1442 remain.remove(n)
1440 for p in self.changelog.parents(n):
1443 for p in self.changelog.parents(n):
1441 remove.append(p)
1444 remove.append(p)
1442
1445
1443 # find every node whose parents have been pruned
1446 # find every node whose parents have been pruned
1444 subset = []
1447 subset = []
1445 # find every remote head that will get new children
1448 # find every remote head that will get new children
1446 updated_heads = set()
1449 updated_heads = set()
1447 for n in remain:
1450 for n in remain:
1448 p1, p2 = self.changelog.parents(n)
1451 p1, p2 = self.changelog.parents(n)
1449 if p1 not in remain and p2 not in remain:
1452 if p1 not in remain and p2 not in remain:
1450 subset.append(n)
1453 subset.append(n)
1451 if heads:
1454 if heads:
1452 if p1 in heads:
1455 if p1 in heads:
1453 updated_heads.add(p1)
1456 updated_heads.add(p1)
1454 if p2 in heads:
1457 if p2 in heads:
1455 updated_heads.add(p2)
1458 updated_heads.add(p2)
1456
1459
1457 # this is the set of all roots we have to push
1460 # this is the set of all roots we have to push
1458 if heads:
1461 if heads:
1459 return subset, list(updated_heads)
1462 return subset, list(updated_heads)
1460 else:
1463 else:
1461 return subset
1464 return subset
1462
1465
1463 def pull(self, remote, heads=None, force=False):
1466 def pull(self, remote, heads=None, force=False):
1464 lock = self.lock()
1467 lock = self.lock()
1465 try:
1468 try:
1466 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1469 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1467 force=force)
1470 force=force)
1468 if fetch == [nullid]:
1471 if fetch == [nullid]:
1469 self.ui.status(_("requesting all changes\n"))
1472 self.ui.status(_("requesting all changes\n"))
1470
1473
1471 if not fetch:
1474 if not fetch:
1472 self.ui.status(_("no changes found\n"))
1475 self.ui.status(_("no changes found\n"))
1473 return 0
1476 return 0
1474
1477
1475 if heads is None and remote.capable('changegroupsubset'):
1478 if heads is None and remote.capable('changegroupsubset'):
1476 heads = rheads
1479 heads = rheads
1477
1480
1478 if heads is None:
1481 if heads is None:
1479 cg = remote.changegroup(fetch, 'pull')
1482 cg = remote.changegroup(fetch, 'pull')
1480 else:
1483 else:
1481 if not remote.capable('changegroupsubset'):
1484 if not remote.capable('changegroupsubset'):
1482 raise util.Abort(_("Partial pull cannot be done because "
1485 raise util.Abort(_("Partial pull cannot be done because "
1483 "other repository doesn't support "
1486 "other repository doesn't support "
1484 "changegroupsubset."))
1487 "changegroupsubset."))
1485 cg = remote.changegroupsubset(fetch, heads, 'pull')
1488 cg = remote.changegroupsubset(fetch, heads, 'pull')
1486 return self.addchangegroup(cg, 'pull', remote.url())
1489 return self.addchangegroup(cg, 'pull', remote.url())
1487 finally:
1490 finally:
1488 lock.release()
1491 lock.release()
1489
1492
1490 def push(self, remote, force=False, revs=None):
1493 def push(self, remote, force=False, revs=None):
1491 # there are two ways to push to remote repo:
1494 # there are two ways to push to remote repo:
1492 #
1495 #
1493 # addchangegroup assumes local user can lock remote
1496 # addchangegroup assumes local user can lock remote
1494 # repo (local filesystem, old ssh servers).
1497 # repo (local filesystem, old ssh servers).
1495 #
1498 #
1496 # unbundle assumes local user cannot lock remote repo (new ssh
1499 # unbundle assumes local user cannot lock remote repo (new ssh
1497 # servers, http servers).
1500 # servers, http servers).
1498
1501
1499 if remote.capable('unbundle'):
1502 if remote.capable('unbundle'):
1500 return self.push_unbundle(remote, force, revs)
1503 return self.push_unbundle(remote, force, revs)
1501 return self.push_addchangegroup(remote, force, revs)
1504 return self.push_addchangegroup(remote, force, revs)
1502
1505
1503 def prepush(self, remote, force, revs):
1506 def prepush(self, remote, force, revs):
1504 common = {}
1507 common = {}
1505 remote_heads = remote.heads()
1508 remote_heads = remote.heads()
1506 inc = self.findincoming(remote, common, remote_heads, force=force)
1509 inc = self.findincoming(remote, common, remote_heads, force=force)
1507
1510
1508 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1511 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1509 if revs is not None:
1512 if revs is not None:
1510 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1513 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1511 else:
1514 else:
1512 bases, heads = update, self.changelog.heads()
1515 bases, heads = update, self.changelog.heads()
1513
1516
1514 def checkbranch(lheads, rheads, updatelh):
1517 def checkbranch(lheads, rheads, updatelh):
1515 '''
1518 '''
1516 check whether there are more local heads than remote heads on
1519 check whether there are more local heads than remote heads on
1517 a specific branch.
1520 a specific branch.
1518
1521
1519 lheads: local branch heads
1522 lheads: local branch heads
1520 rheads: remote branch heads
1523 rheads: remote branch heads
1521 updatelh: outgoing local branch heads
1524 updatelh: outgoing local branch heads
1522 '''
1525 '''
1523
1526
1524 warn = 0
1527 warn = 0
1525
1528
1526 if not revs and len(lheads) > len(rheads):
1529 if not revs and len(lheads) > len(rheads):
1527 warn = 1
1530 warn = 1
1528 else:
1531 else:
1529 updatelheads = [self.changelog.heads(x, lheads)
1532 updatelheads = [self.changelog.heads(x, lheads)
1530 for x in updatelh]
1533 for x in updatelh]
1531 newheads = set(sum(updatelheads, [])) & set(lheads)
1534 newheads = set(sum(updatelheads, [])) & set(lheads)
1532
1535
1533 if not newheads:
1536 if not newheads:
1534 return True
1537 return True
1535
1538
1536 for r in rheads:
1539 for r in rheads:
1537 if r in self.changelog.nodemap:
1540 if r in self.changelog.nodemap:
1538 desc = self.changelog.heads(r, heads)
1541 desc = self.changelog.heads(r, heads)
1539 l = [h for h in heads if h in desc]
1542 l = [h for h in heads if h in desc]
1540 if not l:
1543 if not l:
1541 newheads.add(r)
1544 newheads.add(r)
1542 else:
1545 else:
1543 newheads.add(r)
1546 newheads.add(r)
1544 if len(newheads) > len(rheads):
1547 if len(newheads) > len(rheads):
1545 warn = 1
1548 warn = 1
1546
1549
1547 if warn:
1550 if warn:
1548 if not rheads: # new branch requires --force
1551 if not rheads: # new branch requires --force
1549 self.ui.warn(_("abort: push creates new"
1552 self.ui.warn(_("abort: push creates new"
1550 " remote branch '%s'!\n") %
1553 " remote branch '%s'!\n") %
1551 self[updatelh[0]].branch())
1554 self[updatelh[0]].branch())
1552 else:
1555 else:
1553 self.ui.warn(_("abort: push creates new remote heads!\n"))
1556 self.ui.warn(_("abort: push creates new remote heads!\n"))
1554
1557
1555 self.ui.status(_("(did you forget to merge?"
1558 self.ui.status(_("(did you forget to merge?"
1556 " use push -f to force)\n"))
1559 " use push -f to force)\n"))
1557 return False
1560 return False
1558 return True
1561 return True
1559
1562
1560 if not bases:
1563 if not bases:
1561 self.ui.status(_("no changes found\n"))
1564 self.ui.status(_("no changes found\n"))
1562 return None, 1
1565 return None, 1
1563 elif not force:
1566 elif not force:
1564 # Check for each named branch if we're creating new remote heads.
1567 # Check for each named branch if we're creating new remote heads.
1565 # To be a remote head after push, node must be either:
1568 # To be a remote head after push, node must be either:
1566 # - unknown locally
1569 # - unknown locally
1567 # - a local outgoing head descended from update
1570 # - a local outgoing head descended from update
1568 # - a remote head that's known locally and not
1571 # - a remote head that's known locally and not
1569 # ancestral to an outgoing head
1572 # ancestral to an outgoing head
1570 #
1573 #
1571 # New named branches cannot be created without --force.
1574 # New named branches cannot be created without --force.
1572
1575
1573 if remote_heads != [nullid]:
1576 if remote_heads != [nullid]:
1574 if remote.capable('branchmap'):
1577 if remote.capable('branchmap'):
1575 localhds = {}
1578 localhds = {}
1576 if not revs:
1579 if not revs:
1577 localhds = self.branchmap()
1580 localhds = self.branchmap()
1578 else:
1581 else:
1579 for n in heads:
1582 for n in heads:
1580 branch = self[n].branch()
1583 branch = self[n].branch()
1581 if branch in localhds:
1584 if branch in localhds:
1582 localhds[branch].append(n)
1585 localhds[branch].append(n)
1583 else:
1586 else:
1584 localhds[branch] = [n]
1587 localhds[branch] = [n]
1585
1588
1586 remotehds = remote.branchmap()
1589 remotehds = remote.branchmap()
1587
1590
1588 for lh in localhds:
1591 for lh in localhds:
1589 if lh in remotehds:
1592 if lh in remotehds:
1590 rheads = remotehds[lh]
1593 rheads = remotehds[lh]
1591 else:
1594 else:
1592 rheads = []
1595 rheads = []
1593 lheads = localhds[lh]
1596 lheads = localhds[lh]
1594 updatelh = [upd for upd in update
1597 updatelh = [upd for upd in update
1595 if self[upd].branch() == lh]
1598 if self[upd].branch() == lh]
1596 if not updatelh:
1599 if not updatelh:
1597 continue
1600 continue
1598 if not checkbranch(lheads, rheads, updatelh):
1601 if not checkbranch(lheads, rheads, updatelh):
1599 return None, 0
1602 return None, 0
1600 else:
1603 else:
1601 if not checkbranch(heads, remote_heads, update):
1604 if not checkbranch(heads, remote_heads, update):
1602 return None, 0
1605 return None, 0
1603
1606
1604 if inc:
1607 if inc:
1605 self.ui.warn(_("note: unsynced remote changes!\n"))
1608 self.ui.warn(_("note: unsynced remote changes!\n"))
1606
1609
1607
1610
1608 if revs is None:
1611 if revs is None:
1609 # use the fast path, no race possible on push
1612 # use the fast path, no race possible on push
1610 cg = self._changegroup(common.keys(), 'push')
1613 cg = self._changegroup(common.keys(), 'push')
1611 else:
1614 else:
1612 cg = self.changegroupsubset(update, revs, 'push')
1615 cg = self.changegroupsubset(update, revs, 'push')
1613 return cg, remote_heads
1616 return cg, remote_heads
1614
1617
1615 def push_addchangegroup(self, remote, force, revs):
1618 def push_addchangegroup(self, remote, force, revs):
1616 lock = remote.lock()
1619 lock = remote.lock()
1617 try:
1620 try:
1618 ret = self.prepush(remote, force, revs)
1621 ret = self.prepush(remote, force, revs)
1619 if ret[0] is not None:
1622 if ret[0] is not None:
1620 cg, remote_heads = ret
1623 cg, remote_heads = ret
1621 return remote.addchangegroup(cg, 'push', self.url())
1624 return remote.addchangegroup(cg, 'push', self.url())
1622 return ret[1]
1625 return ret[1]
1623 finally:
1626 finally:
1624 lock.release()
1627 lock.release()
1625
1628
1626 def push_unbundle(self, remote, force, revs):
1629 def push_unbundle(self, remote, force, revs):
1627 # local repo finds heads on server, finds out what revs it
1630 # local repo finds heads on server, finds out what revs it
1628 # must push. once revs transferred, if server finds it has
1631 # must push. once revs transferred, if server finds it has
1629 # different heads (someone else won commit/push race), server
1632 # different heads (someone else won commit/push race), server
1630 # aborts.
1633 # aborts.
1631
1634
1632 ret = self.prepush(remote, force, revs)
1635 ret = self.prepush(remote, force, revs)
1633 if ret[0] is not None:
1636 if ret[0] is not None:
1634 cg, remote_heads = ret
1637 cg, remote_heads = ret
1635 if force: remote_heads = ['force']
1638 if force: remote_heads = ['force']
1636 return remote.unbundle(cg, remote_heads, 'push')
1639 return remote.unbundle(cg, remote_heads, 'push')
1637 return ret[1]
1640 return ret[1]
1638
1641
1639 def changegroupinfo(self, nodes, source):
1642 def changegroupinfo(self, nodes, source):
1640 if self.ui.verbose or source == 'bundle':
1643 if self.ui.verbose or source == 'bundle':
1641 self.ui.status(_("%d changesets found\n") % len(nodes))
1644 self.ui.status(_("%d changesets found\n") % len(nodes))
1642 if self.ui.debugflag:
1645 if self.ui.debugflag:
1643 self.ui.debug(_("list of changesets:\n"))
1646 self.ui.debug(_("list of changesets:\n"))
1644 for node in nodes:
1647 for node in nodes:
1645 self.ui.debug("%s\n" % hex(node))
1648 self.ui.debug("%s\n" % hex(node))
1646
1649
1647 def changegroupsubset(self, bases, heads, source, extranodes=None):
1650 def changegroupsubset(self, bases, heads, source, extranodes=None):
1648 """This function generates a changegroup consisting of all the nodes
1651 """This function generates a changegroup consisting of all the nodes
1649 that are descendents of any of the bases, and ancestors of any of
1652 that are descendents of any of the bases, and ancestors of any of
1650 the heads.
1653 the heads.
1651
1654
1652 It is fairly complex as determining which filenodes and which
1655 It is fairly complex as determining which filenodes and which
1653 manifest nodes need to be included for the changeset to be complete
1656 manifest nodes need to be included for the changeset to be complete
1654 is non-trivial.
1657 is non-trivial.
1655
1658
1656 Another wrinkle is doing the reverse, figuring out which changeset in
1659 Another wrinkle is doing the reverse, figuring out which changeset in
1657 the changegroup a particular filenode or manifestnode belongs to.
1660 the changegroup a particular filenode or manifestnode belongs to.
1658
1661
1659 The caller can specify some nodes that must be included in the
1662 The caller can specify some nodes that must be included in the
1660 changegroup using the extranodes argument. It should be a dict
1663 changegroup using the extranodes argument. It should be a dict
1661 where the keys are the filenames (or 1 for the manifest), and the
1664 where the keys are the filenames (or 1 for the manifest), and the
1662 values are lists of (node, linknode) tuples, where node is a wanted
1665 values are lists of (node, linknode) tuples, where node is a wanted
1663 node and linknode is the changelog node that should be transmitted as
1666 node and linknode is the changelog node that should be transmitted as
1664 the linkrev.
1667 the linkrev.
1665 """
1668 """
1666
1669
1667 if extranodes is None:
1670 if extranodes is None:
1668 # can we go through the fast path ?
1671 # can we go through the fast path ?
1669 heads.sort()
1672 heads.sort()
1670 allheads = self.heads()
1673 allheads = self.heads()
1671 allheads.sort()
1674 allheads.sort()
1672 if heads == allheads:
1675 if heads == allheads:
1673 common = []
1676 common = []
1674 # parents of bases are known from both sides
1677 # parents of bases are known from both sides
1675 for n in bases:
1678 for n in bases:
1676 for p in self.changelog.parents(n):
1679 for p in self.changelog.parents(n):
1677 if p != nullid:
1680 if p != nullid:
1678 common.append(p)
1681 common.append(p)
1679 return self._changegroup(common, source)
1682 return self._changegroup(common, source)
1680
1683
1681 self.hook('preoutgoing', throw=True, source=source)
1684 self.hook('preoutgoing', throw=True, source=source)
1682
1685
1683 # Set up some initial variables
1686 # Set up some initial variables
1684 # Make it easy to refer to self.changelog
1687 # Make it easy to refer to self.changelog
1685 cl = self.changelog
1688 cl = self.changelog
1686 # msng is short for missing - compute the list of changesets in this
1689 # msng is short for missing - compute the list of changesets in this
1687 # changegroup.
1690 # changegroup.
1688 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1691 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1689 self.changegroupinfo(msng_cl_lst, source)
1692 self.changegroupinfo(msng_cl_lst, source)
1690 # Some bases may turn out to be superfluous, and some heads may be
1693 # Some bases may turn out to be superfluous, and some heads may be
1691 # too. nodesbetween will return the minimal set of bases and heads
1694 # too. nodesbetween will return the minimal set of bases and heads
1692 # necessary to re-create the changegroup.
1695 # necessary to re-create the changegroup.
1693
1696
1694 # Known heads are the list of heads that it is assumed the recipient
1697 # Known heads are the list of heads that it is assumed the recipient
1695 # of this changegroup will know about.
1698 # of this changegroup will know about.
1696 knownheads = set()
1699 knownheads = set()
1697 # We assume that all parents of bases are known heads.
1700 # We assume that all parents of bases are known heads.
1698 for n in bases:
1701 for n in bases:
1699 knownheads.update(cl.parents(n))
1702 knownheads.update(cl.parents(n))
1700 knownheads.discard(nullid)
1703 knownheads.discard(nullid)
1701 knownheads = list(knownheads)
1704 knownheads = list(knownheads)
1702 if knownheads:
1705 if knownheads:
1703 # Now that we know what heads are known, we can compute which
1706 # Now that we know what heads are known, we can compute which
1704 # changesets are known. The recipient must know about all
1707 # changesets are known. The recipient must know about all
1705 # changesets required to reach the known heads from the null
1708 # changesets required to reach the known heads from the null
1706 # changeset.
1709 # changeset.
1707 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1710 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1708 junk = None
1711 junk = None
1709 # Transform the list into a set.
1712 # Transform the list into a set.
1710 has_cl_set = set(has_cl_set)
1713 has_cl_set = set(has_cl_set)
1711 else:
1714 else:
1712 # If there were no known heads, the recipient cannot be assumed to
1715 # If there were no known heads, the recipient cannot be assumed to
1713 # know about any changesets.
1716 # know about any changesets.
1714 has_cl_set = set()
1717 has_cl_set = set()
1715
1718
1716 # Make it easy to refer to self.manifest
1719 # Make it easy to refer to self.manifest
1717 mnfst = self.manifest
1720 mnfst = self.manifest
1718 # We don't know which manifests are missing yet
1721 # We don't know which manifests are missing yet
1719 msng_mnfst_set = {}
1722 msng_mnfst_set = {}
1720 # Nor do we know which filenodes are missing.
1723 # Nor do we know which filenodes are missing.
1721 msng_filenode_set = {}
1724 msng_filenode_set = {}
1722
1725
1723 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1726 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1724 junk = None
1727 junk = None
1725
1728
1726 # A changeset always belongs to itself, so the changenode lookup
1729 # A changeset always belongs to itself, so the changenode lookup
1727 # function for a changenode is identity.
1730 # function for a changenode is identity.
1728 def identity(x):
1731 def identity(x):
1729 return x
1732 return x
1730
1733
1731 # If we determine that a particular file or manifest node must be a
1734 # If we determine that a particular file or manifest node must be a
1732 # node that the recipient of the changegroup will already have, we can
1735 # node that the recipient of the changegroup will already have, we can
1733 # also assume the recipient will have all the parents. This function
1736 # also assume the recipient will have all the parents. This function
1734 # prunes them from the set of missing nodes.
1737 # prunes them from the set of missing nodes.
1735 def prune_parents(revlog, hasset, msngset):
1738 def prune_parents(revlog, hasset, msngset):
1736 haslst = list(hasset)
1739 haslst = list(hasset)
1737 haslst.sort(key=revlog.rev)
1740 haslst.sort(key=revlog.rev)
1738 for node in haslst:
1741 for node in haslst:
1739 parentlst = [p for p in revlog.parents(node) if p != nullid]
1742 parentlst = [p for p in revlog.parents(node) if p != nullid]
1740 while parentlst:
1743 while parentlst:
1741 n = parentlst.pop()
1744 n = parentlst.pop()
1742 if n not in hasset:
1745 if n not in hasset:
1743 hasset.add(n)
1746 hasset.add(n)
1744 p = [p for p in revlog.parents(n) if p != nullid]
1747 p = [p for p in revlog.parents(n) if p != nullid]
1745 parentlst.extend(p)
1748 parentlst.extend(p)
1746 for n in hasset:
1749 for n in hasset:
1747 msngset.pop(n, None)
1750 msngset.pop(n, None)
1748
1751
1749 # This is a function generating function used to set up an environment
1752 # This is a function generating function used to set up an environment
1750 # for the inner function to execute in.
1753 # for the inner function to execute in.
1751 def manifest_and_file_collector(changedfileset):
1754 def manifest_and_file_collector(changedfileset):
1752 # This is an information gathering function that gathers
1755 # This is an information gathering function that gathers
1753 # information from each changeset node that goes out as part of
1756 # information from each changeset node that goes out as part of
1754 # the changegroup. The information gathered is a list of which
1757 # the changegroup. The information gathered is a list of which
1755 # manifest nodes are potentially required (the recipient may
1758 # manifest nodes are potentially required (the recipient may
1756 # already have them) and total list of all files which were
1759 # already have them) and total list of all files which were
1757 # changed in any changeset in the changegroup.
1760 # changed in any changeset in the changegroup.
1758 #
1761 #
1759 # We also remember the first changenode we saw any manifest
1762 # We also remember the first changenode we saw any manifest
1760 # referenced by so we can later determine which changenode 'owns'
1763 # referenced by so we can later determine which changenode 'owns'
1761 # the manifest.
1764 # the manifest.
1762 def collect_manifests_and_files(clnode):
1765 def collect_manifests_and_files(clnode):
1763 c = cl.read(clnode)
1766 c = cl.read(clnode)
1764 for f in c[3]:
1767 for f in c[3]:
1765 # This is to make sure we only have one instance of each
1768 # This is to make sure we only have one instance of each
1766 # filename string for each filename.
1769 # filename string for each filename.
1767 changedfileset.setdefault(f, f)
1770 changedfileset.setdefault(f, f)
1768 msng_mnfst_set.setdefault(c[0], clnode)
1771 msng_mnfst_set.setdefault(c[0], clnode)
1769 return collect_manifests_and_files
1772 return collect_manifests_and_files
1770
1773
1771 # Figure out which manifest nodes (of the ones we think might be part
1774 # Figure out which manifest nodes (of the ones we think might be part
1772 # of the changegroup) the recipient must know about and remove them
1775 # of the changegroup) the recipient must know about and remove them
1773 # from the changegroup.
1776 # from the changegroup.
1774 def prune_manifests():
1777 def prune_manifests():
1775 has_mnfst_set = set()
1778 has_mnfst_set = set()
1776 for n in msng_mnfst_set:
1779 for n in msng_mnfst_set:
1777 # If a 'missing' manifest thinks it belongs to a changenode
1780 # If a 'missing' manifest thinks it belongs to a changenode
1778 # the recipient is assumed to have, obviously the recipient
1781 # the recipient is assumed to have, obviously the recipient
1779 # must have that manifest.
1782 # must have that manifest.
1780 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1783 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1781 if linknode in has_cl_set:
1784 if linknode in has_cl_set:
1782 has_mnfst_set.add(n)
1785 has_mnfst_set.add(n)
1783 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1786 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1784
1787
1785 # Use the information collected in collect_manifests_and_files to say
1788 # Use the information collected in collect_manifests_and_files to say
1786 # which changenode any manifestnode belongs to.
1789 # which changenode any manifestnode belongs to.
1787 def lookup_manifest_link(mnfstnode):
1790 def lookup_manifest_link(mnfstnode):
1788 return msng_mnfst_set[mnfstnode]
1791 return msng_mnfst_set[mnfstnode]
1789
1792
1790 # A function generating function that sets up the initial environment
1793 # A function generating function that sets up the initial environment
1791 # the inner function.
1794 # the inner function.
1792 def filenode_collector(changedfiles):
1795 def filenode_collector(changedfiles):
1793 next_rev = [0]
1796 next_rev = [0]
1794 # This gathers information from each manifestnode included in the
1797 # This gathers information from each manifestnode included in the
1795 # changegroup about which filenodes the manifest node references
1798 # changegroup about which filenodes the manifest node references
1796 # so we can include those in the changegroup too.
1799 # so we can include those in the changegroup too.
1797 #
1800 #
1798 # It also remembers which changenode each filenode belongs to. It
1801 # It also remembers which changenode each filenode belongs to. It
1799 # does this by assuming the a filenode belongs to the changenode
1802 # does this by assuming the a filenode belongs to the changenode
1800 # the first manifest that references it belongs to.
1803 # the first manifest that references it belongs to.
1801 def collect_msng_filenodes(mnfstnode):
1804 def collect_msng_filenodes(mnfstnode):
1802 r = mnfst.rev(mnfstnode)
1805 r = mnfst.rev(mnfstnode)
1803 if r == next_rev[0]:
1806 if r == next_rev[0]:
1804 # If the last rev we looked at was the one just previous,
1807 # If the last rev we looked at was the one just previous,
1805 # we only need to see a diff.
1808 # we only need to see a diff.
1806 deltamf = mnfst.readdelta(mnfstnode)
1809 deltamf = mnfst.readdelta(mnfstnode)
1807 # For each line in the delta
1810 # For each line in the delta
1808 for f, fnode in deltamf.iteritems():
1811 for f, fnode in deltamf.iteritems():
1809 f = changedfiles.get(f, None)
1812 f = changedfiles.get(f, None)
1810 # And if the file is in the list of files we care
1813 # And if the file is in the list of files we care
1811 # about.
1814 # about.
1812 if f is not None:
1815 if f is not None:
1813 # Get the changenode this manifest belongs to
1816 # Get the changenode this manifest belongs to
1814 clnode = msng_mnfst_set[mnfstnode]
1817 clnode = msng_mnfst_set[mnfstnode]
1815 # Create the set of filenodes for the file if
1818 # Create the set of filenodes for the file if
1816 # there isn't one already.
1819 # there isn't one already.
1817 ndset = msng_filenode_set.setdefault(f, {})
1820 ndset = msng_filenode_set.setdefault(f, {})
1818 # And set the filenode's changelog node to the
1821 # And set the filenode's changelog node to the
1819 # manifest's if it hasn't been set already.
1822 # manifest's if it hasn't been set already.
1820 ndset.setdefault(fnode, clnode)
1823 ndset.setdefault(fnode, clnode)
1821 else:
1824 else:
1822 # Otherwise we need a full manifest.
1825 # Otherwise we need a full manifest.
1823 m = mnfst.read(mnfstnode)
1826 m = mnfst.read(mnfstnode)
1824 # For every file in we care about.
1827 # For every file in we care about.
1825 for f in changedfiles:
1828 for f in changedfiles:
1826 fnode = m.get(f, None)
1829 fnode = m.get(f, None)
1827 # If it's in the manifest
1830 # If it's in the manifest
1828 if fnode is not None:
1831 if fnode is not None:
1829 # See comments above.
1832 # See comments above.
1830 clnode = msng_mnfst_set[mnfstnode]
1833 clnode = msng_mnfst_set[mnfstnode]
1831 ndset = msng_filenode_set.setdefault(f, {})
1834 ndset = msng_filenode_set.setdefault(f, {})
1832 ndset.setdefault(fnode, clnode)
1835 ndset.setdefault(fnode, clnode)
1833 # Remember the revision we hope to see next.
1836 # Remember the revision we hope to see next.
1834 next_rev[0] = r + 1
1837 next_rev[0] = r + 1
1835 return collect_msng_filenodes
1838 return collect_msng_filenodes
1836
1839
1837 # We have a list of filenodes we think we need for a file, lets remove
1840 # We have a list of filenodes we think we need for a file, lets remove
1838 # all those we know the recipient must have.
1841 # all those we know the recipient must have.
1839 def prune_filenodes(f, filerevlog):
1842 def prune_filenodes(f, filerevlog):
1840 msngset = msng_filenode_set[f]
1843 msngset = msng_filenode_set[f]
1841 hasset = set()
1844 hasset = set()
1842 # If a 'missing' filenode thinks it belongs to a changenode we
1845 # If a 'missing' filenode thinks it belongs to a changenode we
1843 # assume the recipient must have, then the recipient must have
1846 # assume the recipient must have, then the recipient must have
1844 # that filenode.
1847 # that filenode.
1845 for n in msngset:
1848 for n in msngset:
1846 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1849 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1847 if clnode in has_cl_set:
1850 if clnode in has_cl_set:
1848 hasset.add(n)
1851 hasset.add(n)
1849 prune_parents(filerevlog, hasset, msngset)
1852 prune_parents(filerevlog, hasset, msngset)
1850
1853
1851 # A function generator function that sets up the a context for the
1854 # A function generator function that sets up the a context for the
1852 # inner function.
1855 # inner function.
1853 def lookup_filenode_link_func(fname):
1856 def lookup_filenode_link_func(fname):
1854 msngset = msng_filenode_set[fname]
1857 msngset = msng_filenode_set[fname]
1855 # Lookup the changenode the filenode belongs to.
1858 # Lookup the changenode the filenode belongs to.
1856 def lookup_filenode_link(fnode):
1859 def lookup_filenode_link(fnode):
1857 return msngset[fnode]
1860 return msngset[fnode]
1858 return lookup_filenode_link
1861 return lookup_filenode_link
1859
1862
1860 # Add the nodes that were explicitly requested.
1863 # Add the nodes that were explicitly requested.
1861 def add_extra_nodes(name, nodes):
1864 def add_extra_nodes(name, nodes):
1862 if not extranodes or name not in extranodes:
1865 if not extranodes or name not in extranodes:
1863 return
1866 return
1864
1867
1865 for node, linknode in extranodes[name]:
1868 for node, linknode in extranodes[name]:
1866 if node not in nodes:
1869 if node not in nodes:
1867 nodes[node] = linknode
1870 nodes[node] = linknode
1868
1871
1869 # Now that we have all theses utility functions to help out and
1872 # Now that we have all theses utility functions to help out and
1870 # logically divide up the task, generate the group.
1873 # logically divide up the task, generate the group.
1871 def gengroup():
1874 def gengroup():
1872 # The set of changed files starts empty.
1875 # The set of changed files starts empty.
1873 changedfiles = {}
1876 changedfiles = {}
1874 # Create a changenode group generator that will call our functions
1877 # Create a changenode group generator that will call our functions
1875 # back to lookup the owning changenode and collect information.
1878 # back to lookup the owning changenode and collect information.
1876 group = cl.group(msng_cl_lst, identity,
1879 group = cl.group(msng_cl_lst, identity,
1877 manifest_and_file_collector(changedfiles))
1880 manifest_and_file_collector(changedfiles))
1878 for chnk in group:
1881 for chnk in group:
1879 yield chnk
1882 yield chnk
1880
1883
1881 # The list of manifests has been collected by the generator
1884 # The list of manifests has been collected by the generator
1882 # calling our functions back.
1885 # calling our functions back.
1883 prune_manifests()
1886 prune_manifests()
1884 add_extra_nodes(1, msng_mnfst_set)
1887 add_extra_nodes(1, msng_mnfst_set)
1885 msng_mnfst_lst = msng_mnfst_set.keys()
1888 msng_mnfst_lst = msng_mnfst_set.keys()
1886 # Sort the manifestnodes by revision number.
1889 # Sort the manifestnodes by revision number.
1887 msng_mnfst_lst.sort(key=mnfst.rev)
1890 msng_mnfst_lst.sort(key=mnfst.rev)
1888 # Create a generator for the manifestnodes that calls our lookup
1891 # Create a generator for the manifestnodes that calls our lookup
1889 # and data collection functions back.
1892 # and data collection functions back.
1890 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1893 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1891 filenode_collector(changedfiles))
1894 filenode_collector(changedfiles))
1892 for chnk in group:
1895 for chnk in group:
1893 yield chnk
1896 yield chnk
1894
1897
1895 # These are no longer needed, dereference and toss the memory for
1898 # These are no longer needed, dereference and toss the memory for
1896 # them.
1899 # them.
1897 msng_mnfst_lst = None
1900 msng_mnfst_lst = None
1898 msng_mnfst_set.clear()
1901 msng_mnfst_set.clear()
1899
1902
1900 if extranodes:
1903 if extranodes:
1901 for fname in extranodes:
1904 for fname in extranodes:
1902 if isinstance(fname, int):
1905 if isinstance(fname, int):
1903 continue
1906 continue
1904 msng_filenode_set.setdefault(fname, {})
1907 msng_filenode_set.setdefault(fname, {})
1905 changedfiles[fname] = 1
1908 changedfiles[fname] = 1
1906 # Go through all our files in order sorted by name.
1909 # Go through all our files in order sorted by name.
1907 for fname in sorted(changedfiles):
1910 for fname in sorted(changedfiles):
1908 filerevlog = self.file(fname)
1911 filerevlog = self.file(fname)
1909 if not len(filerevlog):
1912 if not len(filerevlog):
1910 raise util.Abort(_("empty or missing revlog for %s") % fname)
1913 raise util.Abort(_("empty or missing revlog for %s") % fname)
1911 # Toss out the filenodes that the recipient isn't really
1914 # Toss out the filenodes that the recipient isn't really
1912 # missing.
1915 # missing.
1913 if fname in msng_filenode_set:
1916 if fname in msng_filenode_set:
1914 prune_filenodes(fname, filerevlog)
1917 prune_filenodes(fname, filerevlog)
1915 add_extra_nodes(fname, msng_filenode_set[fname])
1918 add_extra_nodes(fname, msng_filenode_set[fname])
1916 msng_filenode_lst = msng_filenode_set[fname].keys()
1919 msng_filenode_lst = msng_filenode_set[fname].keys()
1917 else:
1920 else:
1918 msng_filenode_lst = []
1921 msng_filenode_lst = []
1919 # If any filenodes are left, generate the group for them,
1922 # If any filenodes are left, generate the group for them,
1920 # otherwise don't bother.
1923 # otherwise don't bother.
1921 if len(msng_filenode_lst) > 0:
1924 if len(msng_filenode_lst) > 0:
1922 yield changegroup.chunkheader(len(fname))
1925 yield changegroup.chunkheader(len(fname))
1923 yield fname
1926 yield fname
1924 # Sort the filenodes by their revision #
1927 # Sort the filenodes by their revision #
1925 msng_filenode_lst.sort(key=filerevlog.rev)
1928 msng_filenode_lst.sort(key=filerevlog.rev)
1926 # Create a group generator and only pass in a changenode
1929 # Create a group generator and only pass in a changenode
1927 # lookup function as we need to collect no information
1930 # lookup function as we need to collect no information
1928 # from filenodes.
1931 # from filenodes.
1929 group = filerevlog.group(msng_filenode_lst,
1932 group = filerevlog.group(msng_filenode_lst,
1930 lookup_filenode_link_func(fname))
1933 lookup_filenode_link_func(fname))
1931 for chnk in group:
1934 for chnk in group:
1932 yield chnk
1935 yield chnk
1933 if fname in msng_filenode_set:
1936 if fname in msng_filenode_set:
1934 # Don't need this anymore, toss it to free memory.
1937 # Don't need this anymore, toss it to free memory.
1935 del msng_filenode_set[fname]
1938 del msng_filenode_set[fname]
1936 # Signal that no more groups are left.
1939 # Signal that no more groups are left.
1937 yield changegroup.closechunk()
1940 yield changegroup.closechunk()
1938
1941
1939 if msng_cl_lst:
1942 if msng_cl_lst:
1940 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1943 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1941
1944
1942 return util.chunkbuffer(gengroup())
1945 return util.chunkbuffer(gengroup())
1943
1946
1944 def changegroup(self, basenodes, source):
1947 def changegroup(self, basenodes, source):
1945 # to avoid a race we use changegroupsubset() (issue1320)
1948 # to avoid a race we use changegroupsubset() (issue1320)
1946 return self.changegroupsubset(basenodes, self.heads(), source)
1949 return self.changegroupsubset(basenodes, self.heads(), source)
1947
1950
1948 def _changegroup(self, common, source):
1951 def _changegroup(self, common, source):
1949 """Generate a changegroup of all nodes that we have that a recipient
1952 """Generate a changegroup of all nodes that we have that a recipient
1950 doesn't.
1953 doesn't.
1951
1954
1952 This is much easier than the previous function as we can assume that
1955 This is much easier than the previous function as we can assume that
1953 the recipient has any changenode we aren't sending them.
1956 the recipient has any changenode we aren't sending them.
1954
1957
1955 common is the set of common nodes between remote and self"""
1958 common is the set of common nodes between remote and self"""
1956
1959
1957 self.hook('preoutgoing', throw=True, source=source)
1960 self.hook('preoutgoing', throw=True, source=source)
1958
1961
1959 cl = self.changelog
1962 cl = self.changelog
1960 nodes = cl.findmissing(common)
1963 nodes = cl.findmissing(common)
1961 revset = set([cl.rev(n) for n in nodes])
1964 revset = set([cl.rev(n) for n in nodes])
1962 self.changegroupinfo(nodes, source)
1965 self.changegroupinfo(nodes, source)
1963
1966
1964 def identity(x):
1967 def identity(x):
1965 return x
1968 return x
1966
1969
1967 def gennodelst(log):
1970 def gennodelst(log):
1968 for r in log:
1971 for r in log:
1969 if log.linkrev(r) in revset:
1972 if log.linkrev(r) in revset:
1970 yield log.node(r)
1973 yield log.node(r)
1971
1974
1972 def changed_file_collector(changedfileset):
1975 def changed_file_collector(changedfileset):
1973 def collect_changed_files(clnode):
1976 def collect_changed_files(clnode):
1974 c = cl.read(clnode)
1977 c = cl.read(clnode)
1975 changedfileset.update(c[3])
1978 changedfileset.update(c[3])
1976 return collect_changed_files
1979 return collect_changed_files
1977
1980
1978 def lookuprevlink_func(revlog):
1981 def lookuprevlink_func(revlog):
1979 def lookuprevlink(n):
1982 def lookuprevlink(n):
1980 return cl.node(revlog.linkrev(revlog.rev(n)))
1983 return cl.node(revlog.linkrev(revlog.rev(n)))
1981 return lookuprevlink
1984 return lookuprevlink
1982
1985
1983 def gengroup():
1986 def gengroup():
1984 # construct a list of all changed files
1987 # construct a list of all changed files
1985 changedfiles = set()
1988 changedfiles = set()
1986
1989
1987 for chnk in cl.group(nodes, identity,
1990 for chnk in cl.group(nodes, identity,
1988 changed_file_collector(changedfiles)):
1991 changed_file_collector(changedfiles)):
1989 yield chnk
1992 yield chnk
1990
1993
1991 mnfst = self.manifest
1994 mnfst = self.manifest
1992 nodeiter = gennodelst(mnfst)
1995 nodeiter = gennodelst(mnfst)
1993 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1996 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1994 yield chnk
1997 yield chnk
1995
1998
1996 for fname in sorted(changedfiles):
1999 for fname in sorted(changedfiles):
1997 filerevlog = self.file(fname)
2000 filerevlog = self.file(fname)
1998 if not len(filerevlog):
2001 if not len(filerevlog):
1999 raise util.Abort(_("empty or missing revlog for %s") % fname)
2002 raise util.Abort(_("empty or missing revlog for %s") % fname)
2000 nodeiter = gennodelst(filerevlog)
2003 nodeiter = gennodelst(filerevlog)
2001 nodeiter = list(nodeiter)
2004 nodeiter = list(nodeiter)
2002 if nodeiter:
2005 if nodeiter:
2003 yield changegroup.chunkheader(len(fname))
2006 yield changegroup.chunkheader(len(fname))
2004 yield fname
2007 yield fname
2005 lookup = lookuprevlink_func(filerevlog)
2008 lookup = lookuprevlink_func(filerevlog)
2006 for chnk in filerevlog.group(nodeiter, lookup):
2009 for chnk in filerevlog.group(nodeiter, lookup):
2007 yield chnk
2010 yield chnk
2008
2011
2009 yield changegroup.closechunk()
2012 yield changegroup.closechunk()
2010
2013
2011 if nodes:
2014 if nodes:
2012 self.hook('outgoing', node=hex(nodes[0]), source=source)
2015 self.hook('outgoing', node=hex(nodes[0]), source=source)
2013
2016
2014 return util.chunkbuffer(gengroup())
2017 return util.chunkbuffer(gengroup())
2015
2018
2016 def addchangegroup(self, source, srctype, url, emptyok=False):
2019 def addchangegroup(self, source, srctype, url, emptyok=False):
2017 """add changegroup to repo.
2020 """add changegroup to repo.
2018
2021
2019 return values:
2022 return values:
2020 - nothing changed or no source: 0
2023 - nothing changed or no source: 0
2021 - more heads than before: 1+added heads (2..n)
2024 - more heads than before: 1+added heads (2..n)
2022 - less heads than before: -1-removed heads (-2..-n)
2025 - less heads than before: -1-removed heads (-2..-n)
2023 - number of heads stays the same: 1
2026 - number of heads stays the same: 1
2024 """
2027 """
2025 def csmap(x):
2028 def csmap(x):
2026 self.ui.debug(_("add changeset %s\n") % short(x))
2029 self.ui.debug(_("add changeset %s\n") % short(x))
2027 return len(cl)
2030 return len(cl)
2028
2031
2029 def revmap(x):
2032 def revmap(x):
2030 return cl.rev(x)
2033 return cl.rev(x)
2031
2034
2032 if not source:
2035 if not source:
2033 return 0
2036 return 0
2034
2037
2035 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2038 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2036
2039
2037 changesets = files = revisions = 0
2040 changesets = files = revisions = 0
2038
2041
2039 # write changelog data to temp files so concurrent readers will not see
2042 # write changelog data to temp files so concurrent readers will not see
2040 # inconsistent view
2043 # inconsistent view
2041 cl = self.changelog
2044 cl = self.changelog
2042 cl.delayupdate()
2045 cl.delayupdate()
2043 oldheads = len(cl.heads())
2046 oldheads = len(cl.heads())
2044
2047
2045 tr = self.transaction()
2048 tr = self.transaction()
2046 try:
2049 try:
2047 trp = weakref.proxy(tr)
2050 trp = weakref.proxy(tr)
2048 # pull off the changeset group
2051 # pull off the changeset group
2049 self.ui.status(_("adding changesets\n"))
2052 self.ui.status(_("adding changesets\n"))
2050 clstart = len(cl)
2053 clstart = len(cl)
2051 chunkiter = changegroup.chunkiter(source)
2054 chunkiter = changegroup.chunkiter(source)
2052 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2055 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2053 raise util.Abort(_("received changelog group is empty"))
2056 raise util.Abort(_("received changelog group is empty"))
2054 clend = len(cl)
2057 clend = len(cl)
2055 changesets = clend - clstart
2058 changesets = clend - clstart
2056
2059
2057 # pull off the manifest group
2060 # pull off the manifest group
2058 self.ui.status(_("adding manifests\n"))
2061 self.ui.status(_("adding manifests\n"))
2059 chunkiter = changegroup.chunkiter(source)
2062 chunkiter = changegroup.chunkiter(source)
2060 # no need to check for empty manifest group here:
2063 # no need to check for empty manifest group here:
2061 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2064 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2062 # no new manifest will be created and the manifest group will
2065 # no new manifest will be created and the manifest group will
2063 # be empty during the pull
2066 # be empty during the pull
2064 self.manifest.addgroup(chunkiter, revmap, trp)
2067 self.manifest.addgroup(chunkiter, revmap, trp)
2065
2068
2066 # process the files
2069 # process the files
2067 self.ui.status(_("adding file changes\n"))
2070 self.ui.status(_("adding file changes\n"))
2068 while 1:
2071 while 1:
2069 f = changegroup.getchunk(source)
2072 f = changegroup.getchunk(source)
2070 if not f:
2073 if not f:
2071 break
2074 break
2072 self.ui.debug(_("adding %s revisions\n") % f)
2075 self.ui.debug(_("adding %s revisions\n") % f)
2073 fl = self.file(f)
2076 fl = self.file(f)
2074 o = len(fl)
2077 o = len(fl)
2075 chunkiter = changegroup.chunkiter(source)
2078 chunkiter = changegroup.chunkiter(source)
2076 if fl.addgroup(chunkiter, revmap, trp) is None:
2079 if fl.addgroup(chunkiter, revmap, trp) is None:
2077 raise util.Abort(_("received file revlog group is empty"))
2080 raise util.Abort(_("received file revlog group is empty"))
2078 revisions += len(fl) - o
2081 revisions += len(fl) - o
2079 files += 1
2082 files += 1
2080
2083
2081 newheads = len(cl.heads())
2084 newheads = len(cl.heads())
2082 heads = ""
2085 heads = ""
2083 if oldheads and newheads != oldheads:
2086 if oldheads and newheads != oldheads:
2084 heads = _(" (%+d heads)") % (newheads - oldheads)
2087 heads = _(" (%+d heads)") % (newheads - oldheads)
2085
2088
2086 self.ui.status(_("added %d changesets"
2089 self.ui.status(_("added %d changesets"
2087 " with %d changes to %d files%s\n")
2090 " with %d changes to %d files%s\n")
2088 % (changesets, revisions, files, heads))
2091 % (changesets, revisions, files, heads))
2089
2092
2090 if changesets > 0:
2093 if changesets > 0:
2091 p = lambda: cl.writepending() and self.root or ""
2094 p = lambda: cl.writepending() and self.root or ""
2092 self.hook('pretxnchangegroup', throw=True,
2095 self.hook('pretxnchangegroup', throw=True,
2093 node=hex(cl.node(clstart)), source=srctype,
2096 node=hex(cl.node(clstart)), source=srctype,
2094 url=url, pending=p)
2097 url=url, pending=p)
2095
2098
2096 # make changelog see real files again
2099 # make changelog see real files again
2097 cl.finalize(trp)
2100 cl.finalize(trp)
2098
2101
2099 tr.close()
2102 tr.close()
2100 finally:
2103 finally:
2101 del tr
2104 del tr
2102
2105
2103 if changesets > 0:
2106 if changesets > 0:
2104 # forcefully update the on-disk branch cache
2107 # forcefully update the on-disk branch cache
2105 self.ui.debug(_("updating the branch cache\n"))
2108 self.ui.debug(_("updating the branch cache\n"))
2106 self.branchtags()
2109 self.branchtags()
2107 self.hook("changegroup", node=hex(cl.node(clstart)),
2110 self.hook("changegroup", node=hex(cl.node(clstart)),
2108 source=srctype, url=url)
2111 source=srctype, url=url)
2109
2112
2110 for i in xrange(clstart, clend):
2113 for i in xrange(clstart, clend):
2111 self.hook("incoming", node=hex(cl.node(i)),
2114 self.hook("incoming", node=hex(cl.node(i)),
2112 source=srctype, url=url)
2115 source=srctype, url=url)
2113
2116
2114 # never return 0 here:
2117 # never return 0 here:
2115 if newheads < oldheads:
2118 if newheads < oldheads:
2116 return newheads - oldheads - 1
2119 return newheads - oldheads - 1
2117 else:
2120 else:
2118 return newheads - oldheads + 1
2121 return newheads - oldheads + 1
2119
2122
2120
2123
2121 def stream_in(self, remote):
2124 def stream_in(self, remote):
2122 fp = remote.stream_out()
2125 fp = remote.stream_out()
2123 l = fp.readline()
2126 l = fp.readline()
2124 try:
2127 try:
2125 resp = int(l)
2128 resp = int(l)
2126 except ValueError:
2129 except ValueError:
2127 raise error.ResponseError(
2130 raise error.ResponseError(
2128 _('Unexpected response from remote server:'), l)
2131 _('Unexpected response from remote server:'), l)
2129 if resp == 1:
2132 if resp == 1:
2130 raise util.Abort(_('operation forbidden by server'))
2133 raise util.Abort(_('operation forbidden by server'))
2131 elif resp == 2:
2134 elif resp == 2:
2132 raise util.Abort(_('locking the remote repository failed'))
2135 raise util.Abort(_('locking the remote repository failed'))
2133 elif resp != 0:
2136 elif resp != 0:
2134 raise util.Abort(_('the server sent an unknown error code'))
2137 raise util.Abort(_('the server sent an unknown error code'))
2135 self.ui.status(_('streaming all changes\n'))
2138 self.ui.status(_('streaming all changes\n'))
2136 l = fp.readline()
2139 l = fp.readline()
2137 try:
2140 try:
2138 total_files, total_bytes = map(int, l.split(' ', 1))
2141 total_files, total_bytes = map(int, l.split(' ', 1))
2139 except (ValueError, TypeError):
2142 except (ValueError, TypeError):
2140 raise error.ResponseError(
2143 raise error.ResponseError(
2141 _('Unexpected response from remote server:'), l)
2144 _('Unexpected response from remote server:'), l)
2142 self.ui.status(_('%d files to transfer, %s of data\n') %
2145 self.ui.status(_('%d files to transfer, %s of data\n') %
2143 (total_files, util.bytecount(total_bytes)))
2146 (total_files, util.bytecount(total_bytes)))
2144 start = time.time()
2147 start = time.time()
2145 for i in xrange(total_files):
2148 for i in xrange(total_files):
2146 # XXX doesn't support '\n' or '\r' in filenames
2149 # XXX doesn't support '\n' or '\r' in filenames
2147 l = fp.readline()
2150 l = fp.readline()
2148 try:
2151 try:
2149 name, size = l.split('\0', 1)
2152 name, size = l.split('\0', 1)
2150 size = int(size)
2153 size = int(size)
2151 except (ValueError, TypeError):
2154 except (ValueError, TypeError):
2152 raise error.ResponseError(
2155 raise error.ResponseError(
2153 _('Unexpected response from remote server:'), l)
2156 _('Unexpected response from remote server:'), l)
2154 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2157 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2155 # for backwards compat, name was partially encoded
2158 # for backwards compat, name was partially encoded
2156 ofp = self.sopener(store.decodedir(name), 'w')
2159 ofp = self.sopener(store.decodedir(name), 'w')
2157 for chunk in util.filechunkiter(fp, limit=size):
2160 for chunk in util.filechunkiter(fp, limit=size):
2158 ofp.write(chunk)
2161 ofp.write(chunk)
2159 ofp.close()
2162 ofp.close()
2160 elapsed = time.time() - start
2163 elapsed = time.time() - start
2161 if elapsed <= 0:
2164 if elapsed <= 0:
2162 elapsed = 0.001
2165 elapsed = 0.001
2163 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2166 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2164 (util.bytecount(total_bytes), elapsed,
2167 (util.bytecount(total_bytes), elapsed,
2165 util.bytecount(total_bytes / elapsed)))
2168 util.bytecount(total_bytes / elapsed)))
2166 self.invalidate()
2169 self.invalidate()
2167 return len(self.heads()) + 1
2170 return len(self.heads()) + 1
2168
2171
2169 def clone(self, remote, heads=[], stream=False):
2172 def clone(self, remote, heads=[], stream=False):
2170 '''clone remote repository.
2173 '''clone remote repository.
2171
2174
2172 keyword arguments:
2175 keyword arguments:
2173 heads: list of revs to clone (forces use of pull)
2176 heads: list of revs to clone (forces use of pull)
2174 stream: use streaming clone if possible'''
2177 stream: use streaming clone if possible'''
2175
2178
2176 # now, all clients that can request uncompressed clones can
2179 # now, all clients that can request uncompressed clones can
2177 # read repo formats supported by all servers that can serve
2180 # read repo formats supported by all servers that can serve
2178 # them.
2181 # them.
2179
2182
2180 # if revlog format changes, client will have to check version
2183 # if revlog format changes, client will have to check version
2181 # and format flags on "stream" capability, and use
2184 # and format flags on "stream" capability, and use
2182 # uncompressed only if compatible.
2185 # uncompressed only if compatible.
2183
2186
2184 if stream and not heads and remote.capable('stream'):
2187 if stream and not heads and remote.capable('stream'):
2185 return self.stream_in(remote)
2188 return self.stream_in(remote)
2186 return self.pull(remote, heads)
2189 return self.pull(remote, heads)
2187
2190
2188 # used to avoid circular references so destructors work
2191 # used to avoid circular references so destructors work
2189 def aftertrans(files):
2192 def aftertrans(files):
2190 renamefiles = [tuple(t) for t in files]
2193 renamefiles = [tuple(t) for t in files]
2191 def a():
2194 def a():
2192 for src, dest in renamefiles:
2195 for src, dest in renamefiles:
2193 util.rename(src, dest)
2196 util.rename(src, dest)
2194 return a
2197 return a
2195
2198
2196 def instance(ui, path, create):
2199 def instance(ui, path, create):
2197 return localrepository(ui, util.drop_scheme('file', path), create)
2200 return localrepository(ui, util.drop_scheme('file', path), create)
2198
2201
2199 def islocal(path):
2202 def islocal(path):
2200 return True
2203 return True
General Comments 0
You need to be logged in to leave comments. Login now