##// END OF EJS Templates
util: use built-in set instead of util.unique
Martin Geisler -
r8151:12728188 default
parent child Browse files
Show More
@@ -1,335 +1,335 b''
1 # GNU Arch support for the convert extension
1 # GNU Arch support for the convert extension
2
2
3 from common import NoRepo, commandline, commit, converter_source
3 from common import NoRepo, commandline, commit, converter_source
4 from mercurial.i18n import _
4 from mercurial.i18n import _
5 from mercurial import util
5 from mercurial import util
6 import os, shutil, tempfile, stat, locale
6 import os, shutil, tempfile, stat, locale
7 from email.Parser import Parser
7 from email.Parser import Parser
8
8
9 class gnuarch_source(converter_source, commandline):
9 class gnuarch_source(converter_source, commandline):
10
10
11 class gnuarch_rev:
11 class gnuarch_rev:
12 def __init__(self, rev):
12 def __init__(self, rev):
13 self.rev = rev
13 self.rev = rev
14 self.summary = ''
14 self.summary = ''
15 self.date = None
15 self.date = None
16 self.author = ''
16 self.author = ''
17 self.continuationof = None
17 self.continuationof = None
18 self.add_files = []
18 self.add_files = []
19 self.mod_files = []
19 self.mod_files = []
20 self.del_files = []
20 self.del_files = []
21 self.ren_files = {}
21 self.ren_files = {}
22 self.ren_dirs = {}
22 self.ren_dirs = {}
23
23
24 def __init__(self, ui, path, rev=None):
24 def __init__(self, ui, path, rev=None):
25 super(gnuarch_source, self).__init__(ui, path, rev=rev)
25 super(gnuarch_source, self).__init__(ui, path, rev=rev)
26
26
27 if not os.path.exists(os.path.join(path, '{arch}')):
27 if not os.path.exists(os.path.join(path, '{arch}')):
28 raise NoRepo(_("%s does not look like a GNU Arch repo") % path)
28 raise NoRepo(_("%s does not look like a GNU Arch repo") % path)
29
29
30 # Could use checktool, but we want to check for baz or tla.
30 # Could use checktool, but we want to check for baz or tla.
31 self.execmd = None
31 self.execmd = None
32 if util.find_exe('baz'):
32 if util.find_exe('baz'):
33 self.execmd = 'baz'
33 self.execmd = 'baz'
34 else:
34 else:
35 if util.find_exe('tla'):
35 if util.find_exe('tla'):
36 self.execmd = 'tla'
36 self.execmd = 'tla'
37 else:
37 else:
38 raise util.Abort(_('cannot find a GNU Arch tool'))
38 raise util.Abort(_('cannot find a GNU Arch tool'))
39
39
40 commandline.__init__(self, ui, self.execmd)
40 commandline.__init__(self, ui, self.execmd)
41
41
42 self.path = os.path.realpath(path)
42 self.path = os.path.realpath(path)
43 self.tmppath = None
43 self.tmppath = None
44
44
45 self.treeversion = None
45 self.treeversion = None
46 self.lastrev = None
46 self.lastrev = None
47 self.changes = {}
47 self.changes = {}
48 self.parents = {}
48 self.parents = {}
49 self.tags = {}
49 self.tags = {}
50 self.modecache = {}
50 self.modecache = {}
51 self.catlogparser = Parser()
51 self.catlogparser = Parser()
52 self.locale = locale.getpreferredencoding()
52 self.locale = locale.getpreferredencoding()
53 self.archives = []
53 self.archives = []
54
54
55 def before(self):
55 def before(self):
56 # Get registered archives
56 # Get registered archives
57 self.archives = [i.rstrip('\n')
57 self.archives = [i.rstrip('\n')
58 for i in self.runlines0('archives', '-n')]
58 for i in self.runlines0('archives', '-n')]
59
59
60 if self.execmd == 'tla':
60 if self.execmd == 'tla':
61 output = self.run0('tree-version', self.path)
61 output = self.run0('tree-version', self.path)
62 else:
62 else:
63 output = self.run0('tree-version', '-d', self.path)
63 output = self.run0('tree-version', '-d', self.path)
64 self.treeversion = output.strip()
64 self.treeversion = output.strip()
65
65
66 # Get name of temporary directory
66 # Get name of temporary directory
67 version = self.treeversion.split('/')
67 version = self.treeversion.split('/')
68 self.tmppath = os.path.join(tempfile.gettempdir(),
68 self.tmppath = os.path.join(tempfile.gettempdir(),
69 'hg-%s' % version[1])
69 'hg-%s' % version[1])
70
70
71 # Generate parents dictionary
71 # Generate parents dictionary
72 self.parents[None] = []
72 self.parents[None] = []
73 treeversion = self.treeversion
73 treeversion = self.treeversion
74 child = None
74 child = None
75 while treeversion:
75 while treeversion:
76 self.ui.status(_('analyzing tree version %s...\n') % treeversion)
76 self.ui.status(_('analyzing tree version %s...\n') % treeversion)
77
77
78 archive = treeversion.split('/')[0]
78 archive = treeversion.split('/')[0]
79 if archive not in self.archives:
79 if archive not in self.archives:
80 self.ui.status(_('tree analysis stopped because it points to an unregistered archive %s...\n') % archive)
80 self.ui.status(_('tree analysis stopped because it points to an unregistered archive %s...\n') % archive)
81 break
81 break
82
82
83 # Get the complete list of revisions for that tree version
83 # Get the complete list of revisions for that tree version
84 output, status = self.runlines('revisions', '-r', '-f', treeversion)
84 output, status = self.runlines('revisions', '-r', '-f', treeversion)
85 self.checkexit(status, 'failed retrieveing revisions for %s' % treeversion)
85 self.checkexit(status, 'failed retrieveing revisions for %s' % treeversion)
86
86
87 # No new iteration unless a revision has a continuation-of header
87 # No new iteration unless a revision has a continuation-of header
88 treeversion = None
88 treeversion = None
89
89
90 for l in output:
90 for l in output:
91 rev = l.strip()
91 rev = l.strip()
92 self.changes[rev] = self.gnuarch_rev(rev)
92 self.changes[rev] = self.gnuarch_rev(rev)
93 self.parents[rev] = []
93 self.parents[rev] = []
94
94
95 # Read author, date and summary
95 # Read author, date and summary
96 catlog, status = self.run('cat-log', '-d', self.path, rev)
96 catlog, status = self.run('cat-log', '-d', self.path, rev)
97 if status:
97 if status:
98 catlog = self.run0('cat-archive-log', rev)
98 catlog = self.run0('cat-archive-log', rev)
99 self._parsecatlog(catlog, rev)
99 self._parsecatlog(catlog, rev)
100
100
101 # Populate the parents map
101 # Populate the parents map
102 self.parents[child].append(rev)
102 self.parents[child].append(rev)
103
103
104 # Keep track of the current revision as the child of the next
104 # Keep track of the current revision as the child of the next
105 # revision scanned
105 # revision scanned
106 child = rev
106 child = rev
107
107
108 # Check if we have to follow the usual incremental history
108 # Check if we have to follow the usual incremental history
109 # or if we have to 'jump' to a different treeversion given
109 # or if we have to 'jump' to a different treeversion given
110 # by the continuation-of header.
110 # by the continuation-of header.
111 if self.changes[rev].continuationof:
111 if self.changes[rev].continuationof:
112 treeversion = '--'.join(self.changes[rev].continuationof.split('--')[:-1])
112 treeversion = '--'.join(self.changes[rev].continuationof.split('--')[:-1])
113 break
113 break
114
114
115 # If we reached a base-0 revision w/o any continuation-of
115 # If we reached a base-0 revision w/o any continuation-of
116 # header, it means the tree history ends here.
116 # header, it means the tree history ends here.
117 if rev[-6:] == 'base-0':
117 if rev[-6:] == 'base-0':
118 break
118 break
119
119
120 def after(self):
120 def after(self):
121 self.ui.debug(_('cleaning up %s\n') % self.tmppath)
121 self.ui.debug(_('cleaning up %s\n') % self.tmppath)
122 shutil.rmtree(self.tmppath, ignore_errors=True)
122 shutil.rmtree(self.tmppath, ignore_errors=True)
123
123
124 def getheads(self):
124 def getheads(self):
125 return self.parents[None]
125 return self.parents[None]
126
126
127 def getfile(self, name, rev):
127 def getfile(self, name, rev):
128 if rev != self.lastrev:
128 if rev != self.lastrev:
129 raise util.Abort(_('internal calling inconsistency'))
129 raise util.Abort(_('internal calling inconsistency'))
130
130
131 # Raise IOError if necessary (i.e. deleted files).
131 # Raise IOError if necessary (i.e. deleted files).
132 if not os.path.exists(os.path.join(self.tmppath, name)):
132 if not os.path.exists(os.path.join(self.tmppath, name)):
133 raise IOError
133 raise IOError
134
134
135 data, mode = self._getfile(name, rev)
135 data, mode = self._getfile(name, rev)
136 self.modecache[(name, rev)] = mode
136 self.modecache[(name, rev)] = mode
137
137
138 return data
138 return data
139
139
140 def getmode(self, name, rev):
140 def getmode(self, name, rev):
141 return self.modecache[(name, rev)]
141 return self.modecache[(name, rev)]
142
142
143 def getchanges(self, rev):
143 def getchanges(self, rev):
144 self.modecache = {}
144 self.modecache = {}
145 self._update(rev)
145 self._update(rev)
146 changes = []
146 changes = []
147 copies = {}
147 copies = {}
148
148
149 for f in self.changes[rev].add_files:
149 for f in self.changes[rev].add_files:
150 changes.append((f, rev))
150 changes.append((f, rev))
151
151
152 for f in self.changes[rev].mod_files:
152 for f in self.changes[rev].mod_files:
153 changes.append((f, rev))
153 changes.append((f, rev))
154
154
155 for f in self.changes[rev].del_files:
155 for f in self.changes[rev].del_files:
156 changes.append((f, rev))
156 changes.append((f, rev))
157
157
158 for src in self.changes[rev].ren_files:
158 for src in self.changes[rev].ren_files:
159 to = self.changes[rev].ren_files[src]
159 to = self.changes[rev].ren_files[src]
160 changes.append((src, rev))
160 changes.append((src, rev))
161 changes.append((to, rev))
161 changes.append((to, rev))
162 copies[to] = src
162 copies[to] = src
163
163
164 for src in self.changes[rev].ren_dirs:
164 for src in self.changes[rev].ren_dirs:
165 to = self.changes[rev].ren_dirs[src]
165 to = self.changes[rev].ren_dirs[src]
166 chgs, cps = self._rendirchanges(src, to);
166 chgs, cps = self._rendirchanges(src, to);
167 changes += [(f, rev) for f in chgs]
167 changes += [(f, rev) for f in chgs]
168 copies.update(cps)
168 copies.update(cps)
169
169
170 self.lastrev = rev
170 self.lastrev = rev
171 return util.sort(util.unique(changes)), copies
171 return util.sort(set(changes)), copies
172
172
173 def getcommit(self, rev):
173 def getcommit(self, rev):
174 changes = self.changes[rev]
174 changes = self.changes[rev]
175 return commit(author = changes.author, date = changes.date,
175 return commit(author = changes.author, date = changes.date,
176 desc = changes.summary, parents = self.parents[rev], rev=rev)
176 desc = changes.summary, parents = self.parents[rev], rev=rev)
177
177
178 def gettags(self):
178 def gettags(self):
179 return self.tags
179 return self.tags
180
180
181 def _execute(self, cmd, *args, **kwargs):
181 def _execute(self, cmd, *args, **kwargs):
182 cmdline = [self.execmd, cmd]
182 cmdline = [self.execmd, cmd]
183 cmdline += args
183 cmdline += args
184 cmdline = [util.shellquote(arg) for arg in cmdline]
184 cmdline = [util.shellquote(arg) for arg in cmdline]
185 cmdline += ['>', util.nulldev, '2>', util.nulldev]
185 cmdline += ['>', util.nulldev, '2>', util.nulldev]
186 cmdline = util.quotecommand(' '.join(cmdline))
186 cmdline = util.quotecommand(' '.join(cmdline))
187 self.ui.debug(cmdline, '\n')
187 self.ui.debug(cmdline, '\n')
188 return os.system(cmdline)
188 return os.system(cmdline)
189
189
190 def _update(self, rev):
190 def _update(self, rev):
191 self.ui.debug(_('applying revision %s...\n') % rev)
191 self.ui.debug(_('applying revision %s...\n') % rev)
192 changeset, status = self.runlines('replay', '-d', self.tmppath,
192 changeset, status = self.runlines('replay', '-d', self.tmppath,
193 rev)
193 rev)
194 if status:
194 if status:
195 # Something went wrong while merging (baz or tla
195 # Something went wrong while merging (baz or tla
196 # issue?), get latest revision and try from there
196 # issue?), get latest revision and try from there
197 shutil.rmtree(self.tmppath, ignore_errors=True)
197 shutil.rmtree(self.tmppath, ignore_errors=True)
198 self._obtainrevision(rev)
198 self._obtainrevision(rev)
199 else:
199 else:
200 old_rev = self.parents[rev][0]
200 old_rev = self.parents[rev][0]
201 self.ui.debug(_('computing changeset between %s and %s...\n')
201 self.ui.debug(_('computing changeset between %s and %s...\n')
202 % (old_rev, rev))
202 % (old_rev, rev))
203 self._parsechangeset(changeset, rev)
203 self._parsechangeset(changeset, rev)
204
204
205 def _getfile(self, name, rev):
205 def _getfile(self, name, rev):
206 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
206 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
207 if stat.S_ISLNK(mode):
207 if stat.S_ISLNK(mode):
208 data = os.readlink(os.path.join(self.tmppath, name))
208 data = os.readlink(os.path.join(self.tmppath, name))
209 mode = mode and 'l' or ''
209 mode = mode and 'l' or ''
210 else:
210 else:
211 data = open(os.path.join(self.tmppath, name), 'rb').read()
211 data = open(os.path.join(self.tmppath, name), 'rb').read()
212 mode = (mode & 0111) and 'x' or ''
212 mode = (mode & 0111) and 'x' or ''
213 return data, mode
213 return data, mode
214
214
215 def _exclude(self, name):
215 def _exclude(self, name):
216 exclude = [ '{arch}', '.arch-ids', '.arch-inventory' ]
216 exclude = [ '{arch}', '.arch-ids', '.arch-inventory' ]
217 for exc in exclude:
217 for exc in exclude:
218 if name.find(exc) != -1:
218 if name.find(exc) != -1:
219 return True
219 return True
220 return False
220 return False
221
221
222 def _readcontents(self, path):
222 def _readcontents(self, path):
223 files = []
223 files = []
224 contents = os.listdir(path)
224 contents = os.listdir(path)
225 while len(contents) > 0:
225 while len(contents) > 0:
226 c = contents.pop()
226 c = contents.pop()
227 p = os.path.join(path, c)
227 p = os.path.join(path, c)
228 # os.walk could be used, but here we avoid internal GNU
228 # os.walk could be used, but here we avoid internal GNU
229 # Arch files and directories, thus saving a lot time.
229 # Arch files and directories, thus saving a lot time.
230 if not self._exclude(p):
230 if not self._exclude(p):
231 if os.path.isdir(p):
231 if os.path.isdir(p):
232 contents += [os.path.join(c, f) for f in os.listdir(p)]
232 contents += [os.path.join(c, f) for f in os.listdir(p)]
233 else:
233 else:
234 files.append(c)
234 files.append(c)
235 return files
235 return files
236
236
237 def _rendirchanges(self, src, dest):
237 def _rendirchanges(self, src, dest):
238 changes = []
238 changes = []
239 copies = {}
239 copies = {}
240 files = self._readcontents(os.path.join(self.tmppath, dest))
240 files = self._readcontents(os.path.join(self.tmppath, dest))
241 for f in files:
241 for f in files:
242 s = os.path.join(src, f)
242 s = os.path.join(src, f)
243 d = os.path.join(dest, f)
243 d = os.path.join(dest, f)
244 changes.append(s)
244 changes.append(s)
245 changes.append(d)
245 changes.append(d)
246 copies[d] = s
246 copies[d] = s
247 return changes, copies
247 return changes, copies
248
248
249 def _obtainrevision(self, rev):
249 def _obtainrevision(self, rev):
250 self.ui.debug(_('obtaining revision %s...\n') % rev)
250 self.ui.debug(_('obtaining revision %s...\n') % rev)
251 output = self._execute('get', rev, self.tmppath)
251 output = self._execute('get', rev, self.tmppath)
252 self.checkexit(output)
252 self.checkexit(output)
253 self.ui.debug(_('analysing revision %s...\n') % rev)
253 self.ui.debug(_('analysing revision %s...\n') % rev)
254 files = self._readcontents(self.tmppath)
254 files = self._readcontents(self.tmppath)
255 self.changes[rev].add_files += files
255 self.changes[rev].add_files += files
256
256
257 def _stripbasepath(self, path):
257 def _stripbasepath(self, path):
258 if path.startswith('./'):
258 if path.startswith('./'):
259 return path[2:]
259 return path[2:]
260 return path
260 return path
261
261
262 def _parsecatlog(self, data, rev):
262 def _parsecatlog(self, data, rev):
263 try:
263 try:
264 catlog = self.catlogparser.parsestr(data)
264 catlog = self.catlogparser.parsestr(data)
265
265
266 # Commit date
266 # Commit date
267 self.changes[rev].date = util.datestr(
267 self.changes[rev].date = util.datestr(
268 util.strdate(catlog['Standard-date'],
268 util.strdate(catlog['Standard-date'],
269 '%Y-%m-%d %H:%M:%S'))
269 '%Y-%m-%d %H:%M:%S'))
270
270
271 # Commit author
271 # Commit author
272 self.changes[rev].author = self.recode(catlog['Creator'])
272 self.changes[rev].author = self.recode(catlog['Creator'])
273
273
274 # Commit description
274 # Commit description
275 self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
275 self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
276 catlog.get_payload()))
276 catlog.get_payload()))
277 self.changes[rev].summary = self.recode(self.changes[rev].summary)
277 self.changes[rev].summary = self.recode(self.changes[rev].summary)
278
278
279 # Commit revision origin when dealing with a branch or tag
279 # Commit revision origin when dealing with a branch or tag
280 if catlog.has_key('Continuation-of'):
280 if catlog.has_key('Continuation-of'):
281 self.changes[rev].continuationof = self.recode(catlog['Continuation-of'])
281 self.changes[rev].continuationof = self.recode(catlog['Continuation-of'])
282 except Exception:
282 except Exception:
283 raise util.Abort(_('could not parse cat-log of %s') % rev)
283 raise util.Abort(_('could not parse cat-log of %s') % rev)
284
284
285 def _parsechangeset(self, data, rev):
285 def _parsechangeset(self, data, rev):
286 for l in data:
286 for l in data:
287 l = l.strip()
287 l = l.strip()
288 # Added file (ignore added directory)
288 # Added file (ignore added directory)
289 if l.startswith('A') and not l.startswith('A/'):
289 if l.startswith('A') and not l.startswith('A/'):
290 file = self._stripbasepath(l[1:].strip())
290 file = self._stripbasepath(l[1:].strip())
291 if not self._exclude(file):
291 if not self._exclude(file):
292 self.changes[rev].add_files.append(file)
292 self.changes[rev].add_files.append(file)
293 # Deleted file (ignore deleted directory)
293 # Deleted file (ignore deleted directory)
294 elif l.startswith('D') and not l.startswith('D/'):
294 elif l.startswith('D') and not l.startswith('D/'):
295 file = self._stripbasepath(l[1:].strip())
295 file = self._stripbasepath(l[1:].strip())
296 if not self._exclude(file):
296 if not self._exclude(file):
297 self.changes[rev].del_files.append(file)
297 self.changes[rev].del_files.append(file)
298 # Modified binary file
298 # Modified binary file
299 elif l.startswith('Mb'):
299 elif l.startswith('Mb'):
300 file = self._stripbasepath(l[2:].strip())
300 file = self._stripbasepath(l[2:].strip())
301 if not self._exclude(file):
301 if not self._exclude(file):
302 self.changes[rev].mod_files.append(file)
302 self.changes[rev].mod_files.append(file)
303 # Modified link
303 # Modified link
304 elif l.startswith('M->'):
304 elif l.startswith('M->'):
305 file = self._stripbasepath(l[3:].strip())
305 file = self._stripbasepath(l[3:].strip())
306 if not self._exclude(file):
306 if not self._exclude(file):
307 self.changes[rev].mod_files.append(file)
307 self.changes[rev].mod_files.append(file)
308 # Modified file
308 # Modified file
309 elif l.startswith('M'):
309 elif l.startswith('M'):
310 file = self._stripbasepath(l[1:].strip())
310 file = self._stripbasepath(l[1:].strip())
311 if not self._exclude(file):
311 if not self._exclude(file):
312 self.changes[rev].mod_files.append(file)
312 self.changes[rev].mod_files.append(file)
313 # Renamed file (or link)
313 # Renamed file (or link)
314 elif l.startswith('=>'):
314 elif l.startswith('=>'):
315 files = l[2:].strip().split(' ')
315 files = l[2:].strip().split(' ')
316 if len(files) == 1:
316 if len(files) == 1:
317 files = l[2:].strip().split('\t')
317 files = l[2:].strip().split('\t')
318 src = self._stripbasepath(files[0])
318 src = self._stripbasepath(files[0])
319 dst = self._stripbasepath(files[1])
319 dst = self._stripbasepath(files[1])
320 if not self._exclude(src) and not self._exclude(dst):
320 if not self._exclude(src) and not self._exclude(dst):
321 self.changes[rev].ren_files[src] = dst
321 self.changes[rev].ren_files[src] = dst
322 # Conversion from file to link or from link to file (modified)
322 # Conversion from file to link or from link to file (modified)
323 elif l.startswith('ch'):
323 elif l.startswith('ch'):
324 file = self._stripbasepath(l[2:].strip())
324 file = self._stripbasepath(l[2:].strip())
325 if not self._exclude(file):
325 if not self._exclude(file):
326 self.changes[rev].mod_files.append(file)
326 self.changes[rev].mod_files.append(file)
327 # Renamed directory
327 # Renamed directory
328 elif l.startswith('/>'):
328 elif l.startswith('/>'):
329 dirs = l[2:].strip().split(' ')
329 dirs = l[2:].strip().split(' ')
330 if len(dirs) == 1:
330 if len(dirs) == 1:
331 dirs = l[2:].strip().split('\t')
331 dirs = l[2:].strip().split('\t')
332 src = self._stripbasepath(dirs[0])
332 src = self._stripbasepath(dirs[0])
333 dst = self._stripbasepath(dirs[1])
333 dst = self._stripbasepath(dirs[1])
334 if not self._exclude(src) and not self._exclude(dst):
334 if not self._exclude(src) and not self._exclude(dst):
335 self.changes[rev].ren_dirs[src] = dst
335 self.changes[rev].ren_dirs[src] = dst
@@ -1,1205 +1,1205 b''
1 # Subversion 1.4/1.5 Python API backend
1 # Subversion 1.4/1.5 Python API backend
2 #
2 #
3 # Copyright(C) 2007 Daniel Holth et al
3 # Copyright(C) 2007 Daniel Holth et al
4 #
4 #
5 # Configuration options:
5 # Configuration options:
6 #
6 #
7 # convert.svn.trunk
7 # convert.svn.trunk
8 # Relative path to the trunk (default: "trunk")
8 # Relative path to the trunk (default: "trunk")
9 # convert.svn.branches
9 # convert.svn.branches
10 # Relative path to tree of branches (default: "branches")
10 # Relative path to tree of branches (default: "branches")
11 # convert.svn.tags
11 # convert.svn.tags
12 # Relative path to tree of tags (default: "tags")
12 # Relative path to tree of tags (default: "tags")
13 #
13 #
14 # Set these in a hgrc, or on the command line as follows:
14 # Set these in a hgrc, or on the command line as follows:
15 #
15 #
16 # hg convert --config convert.svn.trunk=wackoname [...]
16 # hg convert --config convert.svn.trunk=wackoname [...]
17
17
18 import locale
18 import locale
19 import os
19 import os
20 import re
20 import re
21 import sys
21 import sys
22 import cPickle as pickle
22 import cPickle as pickle
23 import tempfile
23 import tempfile
24 import urllib
24 import urllib
25
25
26 from mercurial import strutil, util
26 from mercurial import strutil, util
27 from mercurial.i18n import _
27 from mercurial.i18n import _
28
28
29 # Subversion stuff. Works best with very recent Python SVN bindings
29 # Subversion stuff. Works best with very recent Python SVN bindings
30 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
30 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
31 # these bindings.
31 # these bindings.
32
32
33 from cStringIO import StringIO
33 from cStringIO import StringIO
34
34
35 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
35 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
36 from common import commandline, converter_source, converter_sink, mapfile
36 from common import commandline, converter_source, converter_sink, mapfile
37
37
38 try:
38 try:
39 from svn.core import SubversionException, Pool
39 from svn.core import SubversionException, Pool
40 import svn
40 import svn
41 import svn.client
41 import svn.client
42 import svn.core
42 import svn.core
43 import svn.ra
43 import svn.ra
44 import svn.delta
44 import svn.delta
45 import transport
45 import transport
46 except ImportError:
46 except ImportError:
47 pass
47 pass
48
48
49 class SvnPathNotFound(Exception):
49 class SvnPathNotFound(Exception):
50 pass
50 pass
51
51
52 def geturl(path):
52 def geturl(path):
53 try:
53 try:
54 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
54 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
55 except SubversionException:
55 except SubversionException:
56 pass
56 pass
57 if os.path.isdir(path):
57 if os.path.isdir(path):
58 path = os.path.normpath(os.path.abspath(path))
58 path = os.path.normpath(os.path.abspath(path))
59 if os.name == 'nt':
59 if os.name == 'nt':
60 path = '/' + util.normpath(path)
60 path = '/' + util.normpath(path)
61 return 'file://%s' % urllib.quote(path)
61 return 'file://%s' % urllib.quote(path)
62 return path
62 return path
63
63
64 def optrev(number):
64 def optrev(number):
65 optrev = svn.core.svn_opt_revision_t()
65 optrev = svn.core.svn_opt_revision_t()
66 optrev.kind = svn.core.svn_opt_revision_number
66 optrev.kind = svn.core.svn_opt_revision_number
67 optrev.value.number = number
67 optrev.value.number = number
68 return optrev
68 return optrev
69
69
70 class changedpath(object):
70 class changedpath(object):
71 def __init__(self, p):
71 def __init__(self, p):
72 self.copyfrom_path = p.copyfrom_path
72 self.copyfrom_path = p.copyfrom_path
73 self.copyfrom_rev = p.copyfrom_rev
73 self.copyfrom_rev = p.copyfrom_rev
74 self.action = p.action
74 self.action = p.action
75
75
76 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
76 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
77 strict_node_history=False):
77 strict_node_history=False):
78 protocol = -1
78 protocol = -1
79 def receiver(orig_paths, revnum, author, date, message, pool):
79 def receiver(orig_paths, revnum, author, date, message, pool):
80 if orig_paths is not None:
80 if orig_paths is not None:
81 for k, v in orig_paths.iteritems():
81 for k, v in orig_paths.iteritems():
82 orig_paths[k] = changedpath(v)
82 orig_paths[k] = changedpath(v)
83 pickle.dump((orig_paths, revnum, author, date, message),
83 pickle.dump((orig_paths, revnum, author, date, message),
84 fp, protocol)
84 fp, protocol)
85
85
86 try:
86 try:
87 # Use an ra of our own so that our parent can consume
87 # Use an ra of our own so that our parent can consume
88 # our results without confusing the server.
88 # our results without confusing the server.
89 t = transport.SvnRaTransport(url=url)
89 t = transport.SvnRaTransport(url=url)
90 svn.ra.get_log(t.ra, paths, start, end, limit,
90 svn.ra.get_log(t.ra, paths, start, end, limit,
91 discover_changed_paths,
91 discover_changed_paths,
92 strict_node_history,
92 strict_node_history,
93 receiver)
93 receiver)
94 except SubversionException, (inst, num):
94 except SubversionException, (inst, num):
95 pickle.dump(num, fp, protocol)
95 pickle.dump(num, fp, protocol)
96 except IOError:
96 except IOError:
97 # Caller may interrupt the iteration
97 # Caller may interrupt the iteration
98 pickle.dump(None, fp, protocol)
98 pickle.dump(None, fp, protocol)
99 else:
99 else:
100 pickle.dump(None, fp, protocol)
100 pickle.dump(None, fp, protocol)
101 fp.close()
101 fp.close()
102 # With large history, cleanup process goes crazy and suddenly
102 # With large history, cleanup process goes crazy and suddenly
103 # consumes *huge* amount of memory. The output file being closed,
103 # consumes *huge* amount of memory. The output file being closed,
104 # there is no need for clean termination.
104 # there is no need for clean termination.
105 os._exit(0)
105 os._exit(0)
106
106
107 def debugsvnlog(ui, **opts):
107 def debugsvnlog(ui, **opts):
108 """Fetch SVN log in a subprocess and channel them back to parent to
108 """Fetch SVN log in a subprocess and channel them back to parent to
109 avoid memory collection issues.
109 avoid memory collection issues.
110 """
110 """
111 util.set_binary(sys.stdin)
111 util.set_binary(sys.stdin)
112 util.set_binary(sys.stdout)
112 util.set_binary(sys.stdout)
113 args = decodeargs(sys.stdin.read())
113 args = decodeargs(sys.stdin.read())
114 get_log_child(sys.stdout, *args)
114 get_log_child(sys.stdout, *args)
115
115
116 class logstream:
116 class logstream:
117 """Interruptible revision log iterator."""
117 """Interruptible revision log iterator."""
118 def __init__(self, stdout):
118 def __init__(self, stdout):
119 self._stdout = stdout
119 self._stdout = stdout
120
120
121 def __iter__(self):
121 def __iter__(self):
122 while True:
122 while True:
123 entry = pickle.load(self._stdout)
123 entry = pickle.load(self._stdout)
124 try:
124 try:
125 orig_paths, revnum, author, date, message = entry
125 orig_paths, revnum, author, date, message = entry
126 except:
126 except:
127 if entry is None:
127 if entry is None:
128 break
128 break
129 raise SubversionException("child raised exception", entry)
129 raise SubversionException("child raised exception", entry)
130 yield entry
130 yield entry
131
131
132 def close(self):
132 def close(self):
133 if self._stdout:
133 if self._stdout:
134 self._stdout.close()
134 self._stdout.close()
135 self._stdout = None
135 self._stdout = None
136
136
137
137
138 # Check to see if the given path is a local Subversion repo. Verify this by
138 # Check to see if the given path is a local Subversion repo. Verify this by
139 # looking for several svn-specific files and directories in the given
139 # looking for several svn-specific files and directories in the given
140 # directory.
140 # directory.
141 def filecheck(path, proto):
141 def filecheck(path, proto):
142 for x in ('locks', 'hooks', 'format', 'db', ):
142 for x in ('locks', 'hooks', 'format', 'db', ):
143 if not os.path.exists(os.path.join(path, x)):
143 if not os.path.exists(os.path.join(path, x)):
144 return False
144 return False
145 return True
145 return True
146
146
147 # Check to see if a given path is the root of an svn repo over http. We verify
147 # Check to see if a given path is the root of an svn repo over http. We verify
148 # this by requesting a version-controlled URL we know can't exist and looking
148 # this by requesting a version-controlled URL we know can't exist and looking
149 # for the svn-specific "not found" XML.
149 # for the svn-specific "not found" XML.
150 def httpcheck(path, proto):
150 def httpcheck(path, proto):
151 return ('<m:human-readable errcode="160013">' in
151 return ('<m:human-readable errcode="160013">' in
152 urllib.urlopen('%s://%s/!svn/ver/0/.svn' % (proto, path)).read())
152 urllib.urlopen('%s://%s/!svn/ver/0/.svn' % (proto, path)).read())
153
153
154 protomap = {'http': httpcheck,
154 protomap = {'http': httpcheck,
155 'https': httpcheck,
155 'https': httpcheck,
156 'file': filecheck,
156 'file': filecheck,
157 }
157 }
158 def issvnurl(url):
158 def issvnurl(url):
159 if not '://' in url:
159 if not '://' in url:
160 return False
160 return False
161 proto, path = url.split('://', 1)
161 proto, path = url.split('://', 1)
162 check = protomap.get(proto, lambda p, p2: False)
162 check = protomap.get(proto, lambda p, p2: False)
163 while '/' in path:
163 while '/' in path:
164 if check(path, proto):
164 if check(path, proto):
165 return True
165 return True
166 path = path.rsplit('/', 1)[0]
166 path = path.rsplit('/', 1)[0]
167 return False
167 return False
168
168
169 # SVN conversion code stolen from bzr-svn and tailor
169 # SVN conversion code stolen from bzr-svn and tailor
170 #
170 #
171 # Subversion looks like a versioned filesystem, branches structures
171 # Subversion looks like a versioned filesystem, branches structures
172 # are defined by conventions and not enforced by the tool. First,
172 # are defined by conventions and not enforced by the tool. First,
173 # we define the potential branches (modules) as "trunk" and "branches"
173 # we define the potential branches (modules) as "trunk" and "branches"
174 # children directories. Revisions are then identified by their
174 # children directories. Revisions are then identified by their
175 # module and revision number (and a repository identifier).
175 # module and revision number (and a repository identifier).
176 #
176 #
177 # The revision graph is really a tree (or a forest). By default, a
177 # The revision graph is really a tree (or a forest). By default, a
178 # revision parent is the previous revision in the same module. If the
178 # revision parent is the previous revision in the same module. If the
179 # module directory is copied/moved from another module then the
179 # module directory is copied/moved from another module then the
180 # revision is the module root and its parent the source revision in
180 # revision is the module root and its parent the source revision in
181 # the parent module. A revision has at most one parent.
181 # the parent module. A revision has at most one parent.
182 #
182 #
183 class svn_source(converter_source):
183 class svn_source(converter_source):
184 def __init__(self, ui, url, rev=None):
184 def __init__(self, ui, url, rev=None):
185 super(svn_source, self).__init__(ui, url, rev=rev)
185 super(svn_source, self).__init__(ui, url, rev=rev)
186
186
187 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
187 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
188 (os.path.exists(url) and
188 (os.path.exists(url) and
189 os.path.exists(os.path.join(url, '.svn'))) or
189 os.path.exists(os.path.join(url, '.svn'))) or
190 issvnurl(url)):
190 issvnurl(url)):
191 raise NoRepo("%s does not look like a Subversion repo" % url)
191 raise NoRepo("%s does not look like a Subversion repo" % url)
192
192
193 try:
193 try:
194 SubversionException
194 SubversionException
195 except NameError:
195 except NameError:
196 raise MissingTool(_('Subversion python bindings could not be loaded'))
196 raise MissingTool(_('Subversion python bindings could not be loaded'))
197
197
198 try:
198 try:
199 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
199 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
200 if version < (1, 4):
200 if version < (1, 4):
201 raise MissingTool(_('Subversion python bindings %d.%d found, '
201 raise MissingTool(_('Subversion python bindings %d.%d found, '
202 '1.4 or later required') % version)
202 '1.4 or later required') % version)
203 except AttributeError:
203 except AttributeError:
204 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
204 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
205 'or later required'))
205 'or later required'))
206
206
207 self.encoding = locale.getpreferredencoding()
207 self.encoding = locale.getpreferredencoding()
208 self.lastrevs = {}
208 self.lastrevs = {}
209
209
210 latest = None
210 latest = None
211 try:
211 try:
212 # Support file://path@rev syntax. Useful e.g. to convert
212 # Support file://path@rev syntax. Useful e.g. to convert
213 # deleted branches.
213 # deleted branches.
214 at = url.rfind('@')
214 at = url.rfind('@')
215 if at >= 0:
215 if at >= 0:
216 latest = int(url[at+1:])
216 latest = int(url[at+1:])
217 url = url[:at]
217 url = url[:at]
218 except ValueError:
218 except ValueError:
219 pass
219 pass
220 self.url = geturl(url)
220 self.url = geturl(url)
221 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
221 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
222 try:
222 try:
223 self.transport = transport.SvnRaTransport(url=self.url)
223 self.transport = transport.SvnRaTransport(url=self.url)
224 self.ra = self.transport.ra
224 self.ra = self.transport.ra
225 self.ctx = self.transport.client
225 self.ctx = self.transport.client
226 self.baseurl = svn.ra.get_repos_root(self.ra)
226 self.baseurl = svn.ra.get_repos_root(self.ra)
227 # Module is either empty or a repository path starting with
227 # Module is either empty or a repository path starting with
228 # a slash and not ending with a slash.
228 # a slash and not ending with a slash.
229 self.module = urllib.unquote(self.url[len(self.baseurl):])
229 self.module = urllib.unquote(self.url[len(self.baseurl):])
230 self.prevmodule = None
230 self.prevmodule = None
231 self.rootmodule = self.module
231 self.rootmodule = self.module
232 self.commits = {}
232 self.commits = {}
233 self.paths = {}
233 self.paths = {}
234 self.uuid = svn.ra.get_uuid(self.ra).decode(self.encoding)
234 self.uuid = svn.ra.get_uuid(self.ra).decode(self.encoding)
235 except SubversionException:
235 except SubversionException:
236 ui.print_exc()
236 ui.print_exc()
237 raise NoRepo("%s does not look like a Subversion repo" % self.url)
237 raise NoRepo("%s does not look like a Subversion repo" % self.url)
238
238
239 if rev:
239 if rev:
240 try:
240 try:
241 latest = int(rev)
241 latest = int(rev)
242 except ValueError:
242 except ValueError:
243 raise util.Abort(_('svn: revision %s is not an integer') % rev)
243 raise util.Abort(_('svn: revision %s is not an integer') % rev)
244
244
245 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
245 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
246 try:
246 try:
247 self.startrev = int(self.startrev)
247 self.startrev = int(self.startrev)
248 if self.startrev < 0:
248 if self.startrev < 0:
249 self.startrev = 0
249 self.startrev = 0
250 except ValueError:
250 except ValueError:
251 raise util.Abort(_('svn: start revision %s is not an integer')
251 raise util.Abort(_('svn: start revision %s is not an integer')
252 % self.startrev)
252 % self.startrev)
253
253
254 try:
254 try:
255 self.get_blacklist()
255 self.get_blacklist()
256 except IOError:
256 except IOError:
257 pass
257 pass
258
258
259 self.head = self.latest(self.module, latest)
259 self.head = self.latest(self.module, latest)
260 if not self.head:
260 if not self.head:
261 raise util.Abort(_('no revision found in module %s') %
261 raise util.Abort(_('no revision found in module %s') %
262 self.module.encode(self.encoding))
262 self.module.encode(self.encoding))
263 self.last_changed = self.revnum(self.head)
263 self.last_changed = self.revnum(self.head)
264
264
265 self._changescache = None
265 self._changescache = None
266
266
267 if os.path.exists(os.path.join(url, '.svn/entries')):
267 if os.path.exists(os.path.join(url, '.svn/entries')):
268 self.wc = url
268 self.wc = url
269 else:
269 else:
270 self.wc = None
270 self.wc = None
271 self.convertfp = None
271 self.convertfp = None
272
272
273 def setrevmap(self, revmap):
273 def setrevmap(self, revmap):
274 lastrevs = {}
274 lastrevs = {}
275 for revid in revmap.iterkeys():
275 for revid in revmap.iterkeys():
276 uuid, module, revnum = self.revsplit(revid)
276 uuid, module, revnum = self.revsplit(revid)
277 lastrevnum = lastrevs.setdefault(module, revnum)
277 lastrevnum = lastrevs.setdefault(module, revnum)
278 if revnum > lastrevnum:
278 if revnum > lastrevnum:
279 lastrevs[module] = revnum
279 lastrevs[module] = revnum
280 self.lastrevs = lastrevs
280 self.lastrevs = lastrevs
281
281
282 def exists(self, path, optrev):
282 def exists(self, path, optrev):
283 try:
283 try:
284 svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
284 svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
285 optrev, False, self.ctx)
285 optrev, False, self.ctx)
286 return True
286 return True
287 except SubversionException:
287 except SubversionException:
288 return False
288 return False
289
289
290 def getheads(self):
290 def getheads(self):
291
291
292 def isdir(path, revnum):
292 def isdir(path, revnum):
293 kind = self._checkpath(path, revnum)
293 kind = self._checkpath(path, revnum)
294 return kind == svn.core.svn_node_dir
294 return kind == svn.core.svn_node_dir
295
295
296 def getcfgpath(name, rev):
296 def getcfgpath(name, rev):
297 cfgpath = self.ui.config('convert', 'svn.' + name)
297 cfgpath = self.ui.config('convert', 'svn.' + name)
298 if cfgpath is not None and cfgpath.strip() == '':
298 if cfgpath is not None and cfgpath.strip() == '':
299 return None
299 return None
300 path = (cfgpath or name).strip('/')
300 path = (cfgpath or name).strip('/')
301 if not self.exists(path, rev):
301 if not self.exists(path, rev):
302 if cfgpath:
302 if cfgpath:
303 raise util.Abort(_('expected %s to be at %r, but not found')
303 raise util.Abort(_('expected %s to be at %r, but not found')
304 % (name, path))
304 % (name, path))
305 return None
305 return None
306 self.ui.note(_('found %s at %r\n') % (name, path))
306 self.ui.note(_('found %s at %r\n') % (name, path))
307 return path
307 return path
308
308
309 rev = optrev(self.last_changed)
309 rev = optrev(self.last_changed)
310 oldmodule = ''
310 oldmodule = ''
311 trunk = getcfgpath('trunk', rev)
311 trunk = getcfgpath('trunk', rev)
312 self.tags = getcfgpath('tags', rev)
312 self.tags = getcfgpath('tags', rev)
313 branches = getcfgpath('branches', rev)
313 branches = getcfgpath('branches', rev)
314
314
315 # If the project has a trunk or branches, we will extract heads
315 # If the project has a trunk or branches, we will extract heads
316 # from them. We keep the project root otherwise.
316 # from them. We keep the project root otherwise.
317 if trunk:
317 if trunk:
318 oldmodule = self.module or ''
318 oldmodule = self.module or ''
319 self.module += '/' + trunk
319 self.module += '/' + trunk
320 self.head = self.latest(self.module, self.last_changed)
320 self.head = self.latest(self.module, self.last_changed)
321 if not self.head:
321 if not self.head:
322 raise util.Abort(_('no revision found in module %s') %
322 raise util.Abort(_('no revision found in module %s') %
323 self.module.encode(self.encoding))
323 self.module.encode(self.encoding))
324
324
325 # First head in the list is the module's head
325 # First head in the list is the module's head
326 self.heads = [self.head]
326 self.heads = [self.head]
327 if self.tags is not None:
327 if self.tags is not None:
328 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
328 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
329
329
330 # Check if branches bring a few more heads to the list
330 # Check if branches bring a few more heads to the list
331 if branches:
331 if branches:
332 rpath = self.url.strip('/')
332 rpath = self.url.strip('/')
333 branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
333 branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
334 rev, False, self.ctx)
334 rev, False, self.ctx)
335 for branch in branchnames.keys():
335 for branch in branchnames.keys():
336 module = '%s/%s/%s' % (oldmodule, branches, branch)
336 module = '%s/%s/%s' % (oldmodule, branches, branch)
337 if not isdir(module, self.last_changed):
337 if not isdir(module, self.last_changed):
338 continue
338 continue
339 brevid = self.latest(module, self.last_changed)
339 brevid = self.latest(module, self.last_changed)
340 if not brevid:
340 if not brevid:
341 self.ui.note(_('ignoring empty branch %s\n') %
341 self.ui.note(_('ignoring empty branch %s\n') %
342 branch.encode(self.encoding))
342 branch.encode(self.encoding))
343 continue
343 continue
344 self.ui.note(_('found branch %s at %d\n') %
344 self.ui.note(_('found branch %s at %d\n') %
345 (branch, self.revnum(brevid)))
345 (branch, self.revnum(brevid)))
346 self.heads.append(brevid)
346 self.heads.append(brevid)
347
347
348 if self.startrev and self.heads:
348 if self.startrev and self.heads:
349 if len(self.heads) > 1:
349 if len(self.heads) > 1:
350 raise util.Abort(_('svn: start revision is not supported '
350 raise util.Abort(_('svn: start revision is not supported '
351 'with more than one branch'))
351 'with more than one branch'))
352 revnum = self.revnum(self.heads[0])
352 revnum = self.revnum(self.heads[0])
353 if revnum < self.startrev:
353 if revnum < self.startrev:
354 raise util.Abort(_('svn: no revision found after start revision %d')
354 raise util.Abort(_('svn: no revision found after start revision %d')
355 % self.startrev)
355 % self.startrev)
356
356
357 return self.heads
357 return self.heads
358
358
359 def getfile(self, file, rev):
359 def getfile(self, file, rev):
360 data, mode = self._getfile(file, rev)
360 data, mode = self._getfile(file, rev)
361 self.modecache[(file, rev)] = mode
361 self.modecache[(file, rev)] = mode
362 return data
362 return data
363
363
364 def getmode(self, file, rev):
364 def getmode(self, file, rev):
365 return self.modecache[(file, rev)]
365 return self.modecache[(file, rev)]
366
366
367 def getchanges(self, rev):
367 def getchanges(self, rev):
368 if self._changescache and self._changescache[0] == rev:
368 if self._changescache and self._changescache[0] == rev:
369 return self._changescache[1]
369 return self._changescache[1]
370 self._changescache = None
370 self._changescache = None
371 self.modecache = {}
371 self.modecache = {}
372 (paths, parents) = self.paths[rev]
372 (paths, parents) = self.paths[rev]
373 if parents:
373 if parents:
374 files, copies = self.expandpaths(rev, paths, parents)
374 files, copies = self.expandpaths(rev, paths, parents)
375 else:
375 else:
376 # Perform a full checkout on roots
376 # Perform a full checkout on roots
377 uuid, module, revnum = self.revsplit(rev)
377 uuid, module, revnum = self.revsplit(rev)
378 entries = svn.client.ls(self.baseurl + urllib.quote(module),
378 entries = svn.client.ls(self.baseurl + urllib.quote(module),
379 optrev(revnum), True, self.ctx)
379 optrev(revnum), True, self.ctx)
380 files = [n for n,e in entries.iteritems()
380 files = [n for n,e in entries.iteritems()
381 if e.kind == svn.core.svn_node_file]
381 if e.kind == svn.core.svn_node_file]
382 copies = {}
382 copies = {}
383
383
384 files.sort()
384 files.sort()
385 files = zip(files, [rev] * len(files))
385 files = zip(files, [rev] * len(files))
386
386
387 # caller caches the result, so free it here to release memory
387 # caller caches the result, so free it here to release memory
388 del self.paths[rev]
388 del self.paths[rev]
389 return (files, copies)
389 return (files, copies)
390
390
391 def getchangedfiles(self, rev, i):
391 def getchangedfiles(self, rev, i):
392 changes = self.getchanges(rev)
392 changes = self.getchanges(rev)
393 self._changescache = (rev, changes)
393 self._changescache = (rev, changes)
394 return [f[0] for f in changes[0]]
394 return [f[0] for f in changes[0]]
395
395
396 def getcommit(self, rev):
396 def getcommit(self, rev):
397 if rev not in self.commits:
397 if rev not in self.commits:
398 uuid, module, revnum = self.revsplit(rev)
398 uuid, module, revnum = self.revsplit(rev)
399 self.module = module
399 self.module = module
400 self.reparent(module)
400 self.reparent(module)
401 # We assume that:
401 # We assume that:
402 # - requests for revisions after "stop" come from the
402 # - requests for revisions after "stop" come from the
403 # revision graph backward traversal. Cache all of them
403 # revision graph backward traversal. Cache all of them
404 # down to stop, they will be used eventually.
404 # down to stop, they will be used eventually.
405 # - requests for revisions before "stop" come to get
405 # - requests for revisions before "stop" come to get
406 # isolated branches parents. Just fetch what is needed.
406 # isolated branches parents. Just fetch what is needed.
407 stop = self.lastrevs.get(module, 0)
407 stop = self.lastrevs.get(module, 0)
408 if revnum < stop:
408 if revnum < stop:
409 stop = revnum + 1
409 stop = revnum + 1
410 self._fetch_revisions(revnum, stop)
410 self._fetch_revisions(revnum, stop)
411 commit = self.commits[rev]
411 commit = self.commits[rev]
412 # caller caches the result, so free it here to release memory
412 # caller caches the result, so free it here to release memory
413 del self.commits[rev]
413 del self.commits[rev]
414 return commit
414 return commit
415
415
416 def gettags(self):
416 def gettags(self):
417 tags = {}
417 tags = {}
418 if self.tags is None:
418 if self.tags is None:
419 return tags
419 return tags
420
420
421 # svn tags are just a convention, project branches left in a
421 # svn tags are just a convention, project branches left in a
422 # 'tags' directory. There is no other relationship than
422 # 'tags' directory. There is no other relationship than
423 # ancestry, which is expensive to discover and makes them hard
423 # ancestry, which is expensive to discover and makes them hard
424 # to update incrementally. Worse, past revisions may be
424 # to update incrementally. Worse, past revisions may be
425 # referenced by tags far away in the future, requiring a deep
425 # referenced by tags far away in the future, requiring a deep
426 # history traversal on every calculation. Current code
426 # history traversal on every calculation. Current code
427 # performs a single backward traversal, tracking moves within
427 # performs a single backward traversal, tracking moves within
428 # the tags directory (tag renaming) and recording a new tag
428 # the tags directory (tag renaming) and recording a new tag
429 # everytime a project is copied from outside the tags
429 # everytime a project is copied from outside the tags
430 # directory. It also lists deleted tags, this behaviour may
430 # directory. It also lists deleted tags, this behaviour may
431 # change in the future.
431 # change in the future.
432 pendings = []
432 pendings = []
433 tagspath = self.tags
433 tagspath = self.tags
434 start = svn.ra.get_latest_revnum(self.ra)
434 start = svn.ra.get_latest_revnum(self.ra)
435 try:
435 try:
436 for entry in self._getlog([self.tags], start, self.startrev):
436 for entry in self._getlog([self.tags], start, self.startrev):
437 origpaths, revnum, author, date, message = entry
437 origpaths, revnum, author, date, message = entry
438 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
438 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
439 in origpaths.iteritems() if e.copyfrom_path]
439 in origpaths.iteritems() if e.copyfrom_path]
440 copies.sort()
440 copies.sort()
441 # Apply moves/copies from more specific to general
441 # Apply moves/copies from more specific to general
442 copies.reverse()
442 copies.reverse()
443
443
444 srctagspath = tagspath
444 srctagspath = tagspath
445 if copies and copies[-1][2] == tagspath:
445 if copies and copies[-1][2] == tagspath:
446 # Track tags directory moves
446 # Track tags directory moves
447 srctagspath = copies.pop()[0]
447 srctagspath = copies.pop()[0]
448
448
449 for source, sourcerev, dest in copies:
449 for source, sourcerev, dest in copies:
450 if not dest.startswith(tagspath + '/'):
450 if not dest.startswith(tagspath + '/'):
451 continue
451 continue
452 for tag in pendings:
452 for tag in pendings:
453 if tag[0].startswith(dest):
453 if tag[0].startswith(dest):
454 tagpath = source + tag[0][len(dest):]
454 tagpath = source + tag[0][len(dest):]
455 tag[:2] = [tagpath, sourcerev]
455 tag[:2] = [tagpath, sourcerev]
456 break
456 break
457 else:
457 else:
458 pendings.append([source, sourcerev, dest.split('/')[-1]])
458 pendings.append([source, sourcerev, dest.split('/')[-1]])
459
459
460 # Tell tag renamings from tag creations
460 # Tell tag renamings from tag creations
461 remainings = []
461 remainings = []
462 for source, sourcerev, tagname in pendings:
462 for source, sourcerev, tagname in pendings:
463 if source.startswith(srctagspath):
463 if source.startswith(srctagspath):
464 remainings.append([source, sourcerev, tagname])
464 remainings.append([source, sourcerev, tagname])
465 continue
465 continue
466 # From revision may be fake, get one with changes
466 # From revision may be fake, get one with changes
467 try:
467 try:
468 tagid = self.latest(source, sourcerev)
468 tagid = self.latest(source, sourcerev)
469 if tagid:
469 if tagid:
470 tags[tagname] = tagid
470 tags[tagname] = tagid
471 except SvnPathNotFound:
471 except SvnPathNotFound:
472 # It happens when we are following directories we assumed
472 # It happens when we are following directories we assumed
473 # were copied with their parents but were really created
473 # were copied with their parents but were really created
474 # in the tag directory.
474 # in the tag directory.
475 pass
475 pass
476 pendings = remainings
476 pendings = remainings
477 tagspath = srctagspath
477 tagspath = srctagspath
478
478
479 except SubversionException:
479 except SubversionException:
480 self.ui.note(_('no tags found at revision %d\n') % start)
480 self.ui.note(_('no tags found at revision %d\n') % start)
481 return tags
481 return tags
482
482
483 def converted(self, rev, destrev):
483 def converted(self, rev, destrev):
484 if not self.wc:
484 if not self.wc:
485 return
485 return
486 if self.convertfp is None:
486 if self.convertfp is None:
487 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
487 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
488 'a')
488 'a')
489 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
489 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
490 self.convertfp.flush()
490 self.convertfp.flush()
491
491
492 # -- helper functions --
492 # -- helper functions --
493
493
494 def revid(self, revnum, module=None):
494 def revid(self, revnum, module=None):
495 if not module:
495 if not module:
496 module = self.module
496 module = self.module
497 return u"svn:%s%s@%s" % (self.uuid, module.decode(self.encoding),
497 return u"svn:%s%s@%s" % (self.uuid, module.decode(self.encoding),
498 revnum)
498 revnum)
499
499
500 def revnum(self, rev):
500 def revnum(self, rev):
501 return int(rev.split('@')[-1])
501 return int(rev.split('@')[-1])
502
502
503 def revsplit(self, rev):
503 def revsplit(self, rev):
504 url, revnum = strutil.rsplit(rev.encode(self.encoding), '@', 1)
504 url, revnum = strutil.rsplit(rev.encode(self.encoding), '@', 1)
505 revnum = int(revnum)
505 revnum = int(revnum)
506 parts = url.split('/', 1)
506 parts = url.split('/', 1)
507 uuid = parts.pop(0)[4:]
507 uuid = parts.pop(0)[4:]
508 mod = ''
508 mod = ''
509 if parts:
509 if parts:
510 mod = '/' + parts[0]
510 mod = '/' + parts[0]
511 return uuid, mod, revnum
511 return uuid, mod, revnum
512
512
513 def latest(self, path, stop=0):
513 def latest(self, path, stop=0):
514 """Find the latest revid affecting path, up to stop. It may return
514 """Find the latest revid affecting path, up to stop. It may return
515 a revision in a different module, since a branch may be moved without
515 a revision in a different module, since a branch may be moved without
516 a change being reported. Return None if computed module does not
516 a change being reported. Return None if computed module does not
517 belong to rootmodule subtree.
517 belong to rootmodule subtree.
518 """
518 """
519 if not path.startswith(self.rootmodule):
519 if not path.startswith(self.rootmodule):
520 # Requests on foreign branches may be forbidden at server level
520 # Requests on foreign branches may be forbidden at server level
521 self.ui.debug(_('ignoring foreign branch %r\n') % path)
521 self.ui.debug(_('ignoring foreign branch %r\n') % path)
522 return None
522 return None
523
523
524 if not stop:
524 if not stop:
525 stop = svn.ra.get_latest_revnum(self.ra)
525 stop = svn.ra.get_latest_revnum(self.ra)
526 try:
526 try:
527 prevmodule = self.reparent('')
527 prevmodule = self.reparent('')
528 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
528 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
529 self.reparent(prevmodule)
529 self.reparent(prevmodule)
530 except SubversionException:
530 except SubversionException:
531 dirent = None
531 dirent = None
532 if not dirent:
532 if not dirent:
533 raise SvnPathNotFound(_('%s not found up to revision %d') % (path, stop))
533 raise SvnPathNotFound(_('%s not found up to revision %d') % (path, stop))
534
534
535 # stat() gives us the previous revision on this line of development, but
535 # stat() gives us the previous revision on this line of development, but
536 # it might be in *another module*. Fetch the log and detect renames down
536 # it might be in *another module*. Fetch the log and detect renames down
537 # to the latest revision.
537 # to the latest revision.
538 stream = self._getlog([path], stop, dirent.created_rev)
538 stream = self._getlog([path], stop, dirent.created_rev)
539 try:
539 try:
540 for entry in stream:
540 for entry in stream:
541 paths, revnum, author, date, message = entry
541 paths, revnum, author, date, message = entry
542 if revnum <= dirent.created_rev:
542 if revnum <= dirent.created_rev:
543 break
543 break
544
544
545 for p in paths:
545 for p in paths:
546 if not path.startswith(p) or not paths[p].copyfrom_path:
546 if not path.startswith(p) or not paths[p].copyfrom_path:
547 continue
547 continue
548 newpath = paths[p].copyfrom_path + path[len(p):]
548 newpath = paths[p].copyfrom_path + path[len(p):]
549 self.ui.debug(_("branch renamed from %s to %s at %d\n") %
549 self.ui.debug(_("branch renamed from %s to %s at %d\n") %
550 (path, newpath, revnum))
550 (path, newpath, revnum))
551 path = newpath
551 path = newpath
552 break
552 break
553 finally:
553 finally:
554 stream.close()
554 stream.close()
555
555
556 if not path.startswith(self.rootmodule):
556 if not path.startswith(self.rootmodule):
557 self.ui.debug(_('ignoring foreign branch %r\n') % path)
557 self.ui.debug(_('ignoring foreign branch %r\n') % path)
558 return None
558 return None
559 return self.revid(dirent.created_rev, path)
559 return self.revid(dirent.created_rev, path)
560
560
561 def get_blacklist(self):
561 def get_blacklist(self):
562 """Avoid certain revision numbers.
562 """Avoid certain revision numbers.
563 It is not uncommon for two nearby revisions to cancel each other
563 It is not uncommon for two nearby revisions to cancel each other
564 out, e.g. 'I copied trunk into a subdirectory of itself instead
564 out, e.g. 'I copied trunk into a subdirectory of itself instead
565 of making a branch'. The converted repository is significantly
565 of making a branch'. The converted repository is significantly
566 smaller if we ignore such revisions."""
566 smaller if we ignore such revisions."""
567 self.blacklist = set()
567 self.blacklist = set()
568 blacklist = self.blacklist
568 blacklist = self.blacklist
569 for line in file("blacklist.txt", "r"):
569 for line in file("blacklist.txt", "r"):
570 if not line.startswith("#"):
570 if not line.startswith("#"):
571 try:
571 try:
572 svn_rev = int(line.strip())
572 svn_rev = int(line.strip())
573 blacklist.add(svn_rev)
573 blacklist.add(svn_rev)
574 except ValueError:
574 except ValueError:
575 pass # not an integer or a comment
575 pass # not an integer or a comment
576
576
577 def is_blacklisted(self, svn_rev):
577 def is_blacklisted(self, svn_rev):
578 return svn_rev in self.blacklist
578 return svn_rev in self.blacklist
579
579
580 def reparent(self, module):
580 def reparent(self, module):
581 """Reparent the svn transport and return the previous parent."""
581 """Reparent the svn transport and return the previous parent."""
582 if self.prevmodule == module:
582 if self.prevmodule == module:
583 return module
583 return module
584 svnurl = self.baseurl + urllib.quote(module)
584 svnurl = self.baseurl + urllib.quote(module)
585 prevmodule = self.prevmodule
585 prevmodule = self.prevmodule
586 if prevmodule is None:
586 if prevmodule is None:
587 prevmodule = ''
587 prevmodule = ''
588 self.ui.debug(_("reparent to %s\n") % svnurl)
588 self.ui.debug(_("reparent to %s\n") % svnurl)
589 svn.ra.reparent(self.ra, svnurl)
589 svn.ra.reparent(self.ra, svnurl)
590 self.prevmodule = module
590 self.prevmodule = module
591 return prevmodule
591 return prevmodule
592
592
593 def expandpaths(self, rev, paths, parents):
593 def expandpaths(self, rev, paths, parents):
594 entries = []
594 entries = []
595 copyfrom = {} # Map of entrypath, revision for finding source of deleted revisions.
595 copyfrom = {} # Map of entrypath, revision for finding source of deleted revisions.
596 copies = {}
596 copies = {}
597
597
598 new_module, revnum = self.revsplit(rev)[1:]
598 new_module, revnum = self.revsplit(rev)[1:]
599 if new_module != self.module:
599 if new_module != self.module:
600 self.module = new_module
600 self.module = new_module
601 self.reparent(self.module)
601 self.reparent(self.module)
602
602
603 for path, ent in paths:
603 for path, ent in paths:
604 entrypath = self.getrelpath(path)
604 entrypath = self.getrelpath(path)
605 entry = entrypath.decode(self.encoding)
605 entry = entrypath.decode(self.encoding)
606
606
607 kind = self._checkpath(entrypath, revnum)
607 kind = self._checkpath(entrypath, revnum)
608 if kind == svn.core.svn_node_file:
608 if kind == svn.core.svn_node_file:
609 entries.append(self.recode(entry))
609 entries.append(self.recode(entry))
610 if not ent.copyfrom_path or not parents:
610 if not ent.copyfrom_path or not parents:
611 continue
611 continue
612 # Copy sources not in parent revisions cannot be represented,
612 # Copy sources not in parent revisions cannot be represented,
613 # ignore their origin for now
613 # ignore their origin for now
614 pmodule, prevnum = self.revsplit(parents[0])[1:]
614 pmodule, prevnum = self.revsplit(parents[0])[1:]
615 if ent.copyfrom_rev < prevnum:
615 if ent.copyfrom_rev < prevnum:
616 continue
616 continue
617 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
617 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
618 if not copyfrom_path:
618 if not copyfrom_path:
619 continue
619 continue
620 self.ui.debug(_("copied to %s from %s@%s\n") %
620 self.ui.debug(_("copied to %s from %s@%s\n") %
621 (entrypath, copyfrom_path, ent.copyfrom_rev))
621 (entrypath, copyfrom_path, ent.copyfrom_rev))
622 copies[self.recode(entry)] = self.recode(copyfrom_path)
622 copies[self.recode(entry)] = self.recode(copyfrom_path)
623 elif kind == 0: # gone, but had better be a deleted *file*
623 elif kind == 0: # gone, but had better be a deleted *file*
624 self.ui.debug(_("gone from %s\n") % ent.copyfrom_rev)
624 self.ui.debug(_("gone from %s\n") % ent.copyfrom_rev)
625
625
626 # if a branch is created but entries are removed in the same
626 # if a branch is created but entries are removed in the same
627 # changeset, get the right fromrev
627 # changeset, get the right fromrev
628 # parents cannot be empty here, you cannot remove things from
628 # parents cannot be empty here, you cannot remove things from
629 # a root revision.
629 # a root revision.
630 uuid, old_module, fromrev = self.revsplit(parents[0])
630 uuid, old_module, fromrev = self.revsplit(parents[0])
631
631
632 basepath = old_module + "/" + self.getrelpath(path)
632 basepath = old_module + "/" + self.getrelpath(path)
633 entrypath = basepath
633 entrypath = basepath
634
634
635 def lookup_parts(p):
635 def lookup_parts(p):
636 rc = None
636 rc = None
637 parts = p.split("/")
637 parts = p.split("/")
638 for i in range(len(parts)):
638 for i in range(len(parts)):
639 part = "/".join(parts[:i])
639 part = "/".join(parts[:i])
640 info = part, copyfrom.get(part, None)
640 info = part, copyfrom.get(part, None)
641 if info[1] is not None:
641 if info[1] is not None:
642 self.ui.debug(_("found parent directory %s\n") % info[1])
642 self.ui.debug(_("found parent directory %s\n") % info[1])
643 rc = info
643 rc = info
644 return rc
644 return rc
645
645
646 self.ui.debug(_("base, entry %s %s\n") % (basepath, entrypath))
646 self.ui.debug(_("base, entry %s %s\n") % (basepath, entrypath))
647
647
648 frompath, froment = lookup_parts(entrypath) or (None, revnum - 1)
648 frompath, froment = lookup_parts(entrypath) or (None, revnum - 1)
649
649
650 # need to remove fragment from lookup_parts and replace with copyfrom_path
650 # need to remove fragment from lookup_parts and replace with copyfrom_path
651 if frompath is not None:
651 if frompath is not None:
652 self.ui.debug(_("munge-o-matic\n"))
652 self.ui.debug(_("munge-o-matic\n"))
653 self.ui.debug(entrypath + '\n')
653 self.ui.debug(entrypath + '\n')
654 self.ui.debug(entrypath[len(frompath):] + '\n')
654 self.ui.debug(entrypath[len(frompath):] + '\n')
655 entrypath = froment.copyfrom_path + entrypath[len(frompath):]
655 entrypath = froment.copyfrom_path + entrypath[len(frompath):]
656 fromrev = froment.copyfrom_rev
656 fromrev = froment.copyfrom_rev
657 self.ui.debug(_("info: %s %s %s %s\n") % (frompath, froment, ent, entrypath))
657 self.ui.debug(_("info: %s %s %s %s\n") % (frompath, froment, ent, entrypath))
658
658
659 # We can avoid the reparent calls if the module has not changed
659 # We can avoid the reparent calls if the module has not changed
660 # but it probably does not worth the pain.
660 # but it probably does not worth the pain.
661 prevmodule = self.reparent('')
661 prevmodule = self.reparent('')
662 fromkind = svn.ra.check_path(self.ra, entrypath.strip('/'), fromrev)
662 fromkind = svn.ra.check_path(self.ra, entrypath.strip('/'), fromrev)
663 self.reparent(prevmodule)
663 self.reparent(prevmodule)
664
664
665 if fromkind == svn.core.svn_node_file: # a deleted file
665 if fromkind == svn.core.svn_node_file: # a deleted file
666 entries.append(self.recode(entry))
666 entries.append(self.recode(entry))
667 elif fromkind == svn.core.svn_node_dir:
667 elif fromkind == svn.core.svn_node_dir:
668 # print "Deleted/moved non-file:", revnum, path, ent
668 # print "Deleted/moved non-file:", revnum, path, ent
669 # children = self._find_children(path, revnum - 1)
669 # children = self._find_children(path, revnum - 1)
670 # print "find children %s@%d from %d action %s" % (path, revnum, ent.copyfrom_rev, ent.action)
670 # print "find children %s@%d from %d action %s" % (path, revnum, ent.copyfrom_rev, ent.action)
671 # Sometimes this is tricky. For example: in
671 # Sometimes this is tricky. For example: in
672 # The Subversion Repository revision 6940 a dir
672 # The Subversion Repository revision 6940 a dir
673 # was copied and one of its files was deleted
673 # was copied and one of its files was deleted
674 # from the new location in the same commit. This
674 # from the new location in the same commit. This
675 # code can't deal with that yet.
675 # code can't deal with that yet.
676 if ent.action == 'C':
676 if ent.action == 'C':
677 children = self._find_children(path, fromrev)
677 children = self._find_children(path, fromrev)
678 else:
678 else:
679 oroot = entrypath.strip('/')
679 oroot = entrypath.strip('/')
680 nroot = path.strip('/')
680 nroot = path.strip('/')
681 children = self._find_children(oroot, fromrev)
681 children = self._find_children(oroot, fromrev)
682 children = [s.replace(oroot,nroot) for s in children]
682 children = [s.replace(oroot,nroot) for s in children]
683 # Mark all [files, not directories] as deleted.
683 # Mark all [files, not directories] as deleted.
684 for child in children:
684 for child in children:
685 # Can we move a child directory and its
685 # Can we move a child directory and its
686 # parent in the same commit? (probably can). Could
686 # parent in the same commit? (probably can). Could
687 # cause problems if instead of revnum -1,
687 # cause problems if instead of revnum -1,
688 # we have to look in (copyfrom_path, revnum - 1)
688 # we have to look in (copyfrom_path, revnum - 1)
689 entrypath = self.getrelpath("/" + child, module=old_module)
689 entrypath = self.getrelpath("/" + child, module=old_module)
690 if entrypath:
690 if entrypath:
691 entry = self.recode(entrypath.decode(self.encoding))
691 entry = self.recode(entrypath.decode(self.encoding))
692 if entry in copies:
692 if entry in copies:
693 # deleted file within a copy
693 # deleted file within a copy
694 del copies[entry]
694 del copies[entry]
695 else:
695 else:
696 entries.append(entry)
696 entries.append(entry)
697 else:
697 else:
698 self.ui.debug(_('unknown path in revision %d: %s\n') % \
698 self.ui.debug(_('unknown path in revision %d: %s\n') % \
699 (revnum, path))
699 (revnum, path))
700 elif kind == svn.core.svn_node_dir:
700 elif kind == svn.core.svn_node_dir:
701 # Should probably synthesize normal file entries
701 # Should probably synthesize normal file entries
702 # and handle as above to clean up copy/rename handling.
702 # and handle as above to clean up copy/rename handling.
703
703
704 # If the directory just had a prop change,
704 # If the directory just had a prop change,
705 # then we shouldn't need to look for its children.
705 # then we shouldn't need to look for its children.
706 if ent.action == 'M':
706 if ent.action == 'M':
707 continue
707 continue
708
708
709 # Also this could create duplicate entries. Not sure
709 # Also this could create duplicate entries. Not sure
710 # whether this will matter. Maybe should make entries a set.
710 # whether this will matter. Maybe should make entries a set.
711 # print "Changed directory", revnum, path, ent.action, ent.copyfrom_path, ent.copyfrom_rev
711 # print "Changed directory", revnum, path, ent.action, ent.copyfrom_path, ent.copyfrom_rev
712 # This will fail if a directory was copied
712 # This will fail if a directory was copied
713 # from another branch and then some of its files
713 # from another branch and then some of its files
714 # were deleted in the same transaction.
714 # were deleted in the same transaction.
715 children = util.sort(self._find_children(path, revnum))
715 children = util.sort(self._find_children(path, revnum))
716 for child in children:
716 for child in children:
717 # Can we move a child directory and its
717 # Can we move a child directory and its
718 # parent in the same commit? (probably can). Could
718 # parent in the same commit? (probably can). Could
719 # cause problems if instead of revnum -1,
719 # cause problems if instead of revnum -1,
720 # we have to look in (copyfrom_path, revnum - 1)
720 # we have to look in (copyfrom_path, revnum - 1)
721 entrypath = self.getrelpath("/" + child)
721 entrypath = self.getrelpath("/" + child)
722 # print child, self.module, entrypath
722 # print child, self.module, entrypath
723 if entrypath:
723 if entrypath:
724 # Need to filter out directories here...
724 # Need to filter out directories here...
725 kind = self._checkpath(entrypath, revnum)
725 kind = self._checkpath(entrypath, revnum)
726 if kind != svn.core.svn_node_dir:
726 if kind != svn.core.svn_node_dir:
727 entries.append(self.recode(entrypath))
727 entries.append(self.recode(entrypath))
728
728
729 # Copies here (must copy all from source)
729 # Copies here (must copy all from source)
730 # Probably not a real problem for us if
730 # Probably not a real problem for us if
731 # source does not exist
731 # source does not exist
732 if not ent.copyfrom_path or not parents:
732 if not ent.copyfrom_path or not parents:
733 continue
733 continue
734 # Copy sources not in parent revisions cannot be represented,
734 # Copy sources not in parent revisions cannot be represented,
735 # ignore their origin for now
735 # ignore their origin for now
736 pmodule, prevnum = self.revsplit(parents[0])[1:]
736 pmodule, prevnum = self.revsplit(parents[0])[1:]
737 if ent.copyfrom_rev < prevnum:
737 if ent.copyfrom_rev < prevnum:
738 continue
738 continue
739 copyfrompath = ent.copyfrom_path.decode(self.encoding)
739 copyfrompath = ent.copyfrom_path.decode(self.encoding)
740 copyfrompath = self.getrelpath(copyfrompath, pmodule)
740 copyfrompath = self.getrelpath(copyfrompath, pmodule)
741 if not copyfrompath:
741 if not copyfrompath:
742 continue
742 continue
743 copyfrom[path] = ent
743 copyfrom[path] = ent
744 self.ui.debug(_("mark %s came from %s:%d\n")
744 self.ui.debug(_("mark %s came from %s:%d\n")
745 % (path, copyfrompath, ent.copyfrom_rev))
745 % (path, copyfrompath, ent.copyfrom_rev))
746 children = self._find_children(ent.copyfrom_path, ent.copyfrom_rev)
746 children = self._find_children(ent.copyfrom_path, ent.copyfrom_rev)
747 children.sort()
747 children.sort()
748 for child in children:
748 for child in children:
749 entrypath = self.getrelpath("/" + child, pmodule)
749 entrypath = self.getrelpath("/" + child, pmodule)
750 if not entrypath:
750 if not entrypath:
751 continue
751 continue
752 entry = entrypath.decode(self.encoding)
752 entry = entrypath.decode(self.encoding)
753 copytopath = path + entry[len(copyfrompath):]
753 copytopath = path + entry[len(copyfrompath):]
754 copytopath = self.getrelpath(copytopath)
754 copytopath = self.getrelpath(copytopath)
755 copies[self.recode(copytopath)] = self.recode(entry, pmodule)
755 copies[self.recode(copytopath)] = self.recode(entry, pmodule)
756
756
757 return (util.unique(entries), copies)
757 return (list(set(entries)), copies)
758
758
759 def _fetch_revisions(self, from_revnum, to_revnum):
759 def _fetch_revisions(self, from_revnum, to_revnum):
760 if from_revnum < to_revnum:
760 if from_revnum < to_revnum:
761 from_revnum, to_revnum = to_revnum, from_revnum
761 from_revnum, to_revnum = to_revnum, from_revnum
762
762
763 self.child_cset = None
763 self.child_cset = None
764
764
765 def parselogentry(orig_paths, revnum, author, date, message):
765 def parselogentry(orig_paths, revnum, author, date, message):
766 """Return the parsed commit object or None, and True if
766 """Return the parsed commit object or None, and True if
767 the revision is a branch root.
767 the revision is a branch root.
768 """
768 """
769 self.ui.debug(_("parsing revision %d (%d changes)\n") %
769 self.ui.debug(_("parsing revision %d (%d changes)\n") %
770 (revnum, len(orig_paths)))
770 (revnum, len(orig_paths)))
771
771
772 branched = False
772 branched = False
773 rev = self.revid(revnum)
773 rev = self.revid(revnum)
774 # branch log might return entries for a parent we already have
774 # branch log might return entries for a parent we already have
775
775
776 if rev in self.commits or revnum < to_revnum:
776 if rev in self.commits or revnum < to_revnum:
777 return None, branched
777 return None, branched
778
778
779 parents = []
779 parents = []
780 # check whether this revision is the start of a branch or part
780 # check whether this revision is the start of a branch or part
781 # of a branch renaming
781 # of a branch renaming
782 orig_paths = util.sort(orig_paths.items())
782 orig_paths = util.sort(orig_paths.items())
783 root_paths = [(p,e) for p,e in orig_paths if self.module.startswith(p)]
783 root_paths = [(p,e) for p,e in orig_paths if self.module.startswith(p)]
784 if root_paths:
784 if root_paths:
785 path, ent = root_paths[-1]
785 path, ent = root_paths[-1]
786 if ent.copyfrom_path:
786 if ent.copyfrom_path:
787 branched = True
787 branched = True
788 newpath = ent.copyfrom_path + self.module[len(path):]
788 newpath = ent.copyfrom_path + self.module[len(path):]
789 # ent.copyfrom_rev may not be the actual last revision
789 # ent.copyfrom_rev may not be the actual last revision
790 previd = self.latest(newpath, ent.copyfrom_rev)
790 previd = self.latest(newpath, ent.copyfrom_rev)
791 if previd is not None:
791 if previd is not None:
792 prevmodule, prevnum = self.revsplit(previd)[1:]
792 prevmodule, prevnum = self.revsplit(previd)[1:]
793 if prevnum >= self.startrev:
793 if prevnum >= self.startrev:
794 parents = [previd]
794 parents = [previd]
795 self.ui.note(_('found parent of branch %s at %d: %s\n') %
795 self.ui.note(_('found parent of branch %s at %d: %s\n') %
796 (self.module, prevnum, prevmodule))
796 (self.module, prevnum, prevmodule))
797 else:
797 else:
798 self.ui.debug(_("no copyfrom path, don't know what to do.\n"))
798 self.ui.debug(_("no copyfrom path, don't know what to do.\n"))
799
799
800 paths = []
800 paths = []
801 # filter out unrelated paths
801 # filter out unrelated paths
802 for path, ent in orig_paths:
802 for path, ent in orig_paths:
803 if self.getrelpath(path) is None:
803 if self.getrelpath(path) is None:
804 continue
804 continue
805 paths.append((path, ent))
805 paths.append((path, ent))
806
806
807 # Example SVN datetime. Includes microseconds.
807 # Example SVN datetime. Includes microseconds.
808 # ISO-8601 conformant
808 # ISO-8601 conformant
809 # '2007-01-04T17:35:00.902377Z'
809 # '2007-01-04T17:35:00.902377Z'
810 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
810 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
811
811
812 log = message and self.recode(message) or ''
812 log = message and self.recode(message) or ''
813 author = author and self.recode(author) or ''
813 author = author and self.recode(author) or ''
814 try:
814 try:
815 branch = self.module.split("/")[-1]
815 branch = self.module.split("/")[-1]
816 if branch == 'trunk':
816 if branch == 'trunk':
817 branch = ''
817 branch = ''
818 except IndexError:
818 except IndexError:
819 branch = None
819 branch = None
820
820
821 cset = commit(author=author,
821 cset = commit(author=author,
822 date=util.datestr(date),
822 date=util.datestr(date),
823 desc=log,
823 desc=log,
824 parents=parents,
824 parents=parents,
825 branch=branch,
825 branch=branch,
826 rev=rev.encode('utf-8'))
826 rev=rev.encode('utf-8'))
827
827
828 self.commits[rev] = cset
828 self.commits[rev] = cset
829 # The parents list is *shared* among self.paths and the
829 # The parents list is *shared* among self.paths and the
830 # commit object. Both will be updated below.
830 # commit object. Both will be updated below.
831 self.paths[rev] = (paths, cset.parents)
831 self.paths[rev] = (paths, cset.parents)
832 if self.child_cset and not self.child_cset.parents:
832 if self.child_cset and not self.child_cset.parents:
833 self.child_cset.parents[:] = [rev]
833 self.child_cset.parents[:] = [rev]
834 self.child_cset = cset
834 self.child_cset = cset
835 return cset, branched
835 return cset, branched
836
836
837 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
837 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
838 (self.module, from_revnum, to_revnum))
838 (self.module, from_revnum, to_revnum))
839
839
840 try:
840 try:
841 firstcset = None
841 firstcset = None
842 lastonbranch = False
842 lastonbranch = False
843 stream = self._getlog([self.module], from_revnum, to_revnum)
843 stream = self._getlog([self.module], from_revnum, to_revnum)
844 try:
844 try:
845 for entry in stream:
845 for entry in stream:
846 paths, revnum, author, date, message = entry
846 paths, revnum, author, date, message = entry
847 if revnum < self.startrev:
847 if revnum < self.startrev:
848 lastonbranch = True
848 lastonbranch = True
849 break
849 break
850 if self.is_blacklisted(revnum):
850 if self.is_blacklisted(revnum):
851 self.ui.note(_('skipping blacklisted revision %d\n')
851 self.ui.note(_('skipping blacklisted revision %d\n')
852 % revnum)
852 % revnum)
853 continue
853 continue
854 if paths is None:
854 if paths is None:
855 self.ui.debug(_('revision %d has no entries\n') % revnum)
855 self.ui.debug(_('revision %d has no entries\n') % revnum)
856 continue
856 continue
857 cset, lastonbranch = parselogentry(paths, revnum, author,
857 cset, lastonbranch = parselogentry(paths, revnum, author,
858 date, message)
858 date, message)
859 if cset:
859 if cset:
860 firstcset = cset
860 firstcset = cset
861 if lastonbranch:
861 if lastonbranch:
862 break
862 break
863 finally:
863 finally:
864 stream.close()
864 stream.close()
865
865
866 if not lastonbranch and firstcset and not firstcset.parents:
866 if not lastonbranch and firstcset and not firstcset.parents:
867 # The first revision of the sequence (the last fetched one)
867 # The first revision of the sequence (the last fetched one)
868 # has invalid parents if not a branch root. Find the parent
868 # has invalid parents if not a branch root. Find the parent
869 # revision now, if any.
869 # revision now, if any.
870 try:
870 try:
871 firstrevnum = self.revnum(firstcset.rev)
871 firstrevnum = self.revnum(firstcset.rev)
872 if firstrevnum > 1:
872 if firstrevnum > 1:
873 latest = self.latest(self.module, firstrevnum - 1)
873 latest = self.latest(self.module, firstrevnum - 1)
874 if latest:
874 if latest:
875 firstcset.parents.append(latest)
875 firstcset.parents.append(latest)
876 except SvnPathNotFound:
876 except SvnPathNotFound:
877 pass
877 pass
878 except SubversionException, (inst, num):
878 except SubversionException, (inst, num):
879 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
879 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
880 raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
880 raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
881 raise
881 raise
882
882
883 def _getfile(self, file, rev):
883 def _getfile(self, file, rev):
884 # TODO: ra.get_file transmits the whole file instead of diffs.
884 # TODO: ra.get_file transmits the whole file instead of diffs.
885 mode = ''
885 mode = ''
886 try:
886 try:
887 new_module, revnum = self.revsplit(rev)[1:]
887 new_module, revnum = self.revsplit(rev)[1:]
888 if self.module != new_module:
888 if self.module != new_module:
889 self.module = new_module
889 self.module = new_module
890 self.reparent(self.module)
890 self.reparent(self.module)
891 io = StringIO()
891 io = StringIO()
892 info = svn.ra.get_file(self.ra, file, revnum, io)
892 info = svn.ra.get_file(self.ra, file, revnum, io)
893 data = io.getvalue()
893 data = io.getvalue()
894 # ra.get_files() seems to keep a reference on the input buffer
894 # ra.get_files() seems to keep a reference on the input buffer
895 # preventing collection. Release it explicitely.
895 # preventing collection. Release it explicitely.
896 io.close()
896 io.close()
897 if isinstance(info, list):
897 if isinstance(info, list):
898 info = info[-1]
898 info = info[-1]
899 mode = ("svn:executable" in info) and 'x' or ''
899 mode = ("svn:executable" in info) and 'x' or ''
900 mode = ("svn:special" in info) and 'l' or mode
900 mode = ("svn:special" in info) and 'l' or mode
901 except SubversionException, e:
901 except SubversionException, e:
902 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
902 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
903 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
903 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
904 if e.apr_err in notfound: # File not found
904 if e.apr_err in notfound: # File not found
905 raise IOError()
905 raise IOError()
906 raise
906 raise
907 if mode == 'l':
907 if mode == 'l':
908 link_prefix = "link "
908 link_prefix = "link "
909 if data.startswith(link_prefix):
909 if data.startswith(link_prefix):
910 data = data[len(link_prefix):]
910 data = data[len(link_prefix):]
911 return data, mode
911 return data, mode
912
912
913 def _find_children(self, path, revnum):
913 def _find_children(self, path, revnum):
914 path = path.strip('/')
914 path = path.strip('/')
915 pool = Pool()
915 pool = Pool()
916 rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
916 rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
917 return ['%s/%s' % (path, x) for x in
917 return ['%s/%s' % (path, x) for x in
918 svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
918 svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
919
919
920 def getrelpath(self, path, module=None):
920 def getrelpath(self, path, module=None):
921 if module is None:
921 if module is None:
922 module = self.module
922 module = self.module
923 # Given the repository url of this wc, say
923 # Given the repository url of this wc, say
924 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
924 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
925 # extract the "entry" portion (a relative path) from what
925 # extract the "entry" portion (a relative path) from what
926 # svn log --xml says, ie
926 # svn log --xml says, ie
927 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
927 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
928 # that is to say "tests/PloneTestCase.py"
928 # that is to say "tests/PloneTestCase.py"
929 if path.startswith(module):
929 if path.startswith(module):
930 relative = path.rstrip('/')[len(module):]
930 relative = path.rstrip('/')[len(module):]
931 if relative.startswith('/'):
931 if relative.startswith('/'):
932 return relative[1:]
932 return relative[1:]
933 elif relative == '':
933 elif relative == '':
934 return relative
934 return relative
935
935
936 # The path is outside our tracked tree...
936 # The path is outside our tracked tree...
937 self.ui.debug(_('%r is not under %r, ignoring\n') % (path, module))
937 self.ui.debug(_('%r is not under %r, ignoring\n') % (path, module))
938 return None
938 return None
939
939
940 def _checkpath(self, path, revnum):
940 def _checkpath(self, path, revnum):
941 # ra.check_path does not like leading slashes very much, it leads
941 # ra.check_path does not like leading slashes very much, it leads
942 # to PROPFIND subversion errors
942 # to PROPFIND subversion errors
943 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
943 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
944
944
945 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
945 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
946 strict_node_history=False):
946 strict_node_history=False):
947 # Normalize path names, svn >= 1.5 only wants paths relative to
947 # Normalize path names, svn >= 1.5 only wants paths relative to
948 # supplied URL
948 # supplied URL
949 relpaths = []
949 relpaths = []
950 for p in paths:
950 for p in paths:
951 if not p.startswith('/'):
951 if not p.startswith('/'):
952 p = self.module + '/' + p
952 p = self.module + '/' + p
953 relpaths.append(p.strip('/'))
953 relpaths.append(p.strip('/'))
954 args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
954 args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
955 strict_node_history]
955 strict_node_history]
956 arg = encodeargs(args)
956 arg = encodeargs(args)
957 hgexe = util.hgexecutable()
957 hgexe = util.hgexecutable()
958 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
958 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
959 stdin, stdout = util.popen2(cmd, 'b')
959 stdin, stdout = util.popen2(cmd, 'b')
960 stdin.write(arg)
960 stdin.write(arg)
961 stdin.close()
961 stdin.close()
962 return logstream(stdout)
962 return logstream(stdout)
963
963
964 pre_revprop_change = '''#!/bin/sh
964 pre_revprop_change = '''#!/bin/sh
965
965
966 REPOS="$1"
966 REPOS="$1"
967 REV="$2"
967 REV="$2"
968 USER="$3"
968 USER="$3"
969 PROPNAME="$4"
969 PROPNAME="$4"
970 ACTION="$5"
970 ACTION="$5"
971
971
972 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
972 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
973 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
973 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
974 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
974 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
975
975
976 echo "Changing prohibited revision property" >&2
976 echo "Changing prohibited revision property" >&2
977 exit 1
977 exit 1
978 '''
978 '''
979
979
980 class svn_sink(converter_sink, commandline):
980 class svn_sink(converter_sink, commandline):
981 commit_re = re.compile(r'Committed revision (\d+).', re.M)
981 commit_re = re.compile(r'Committed revision (\d+).', re.M)
982
982
983 def prerun(self):
983 def prerun(self):
984 if self.wc:
984 if self.wc:
985 os.chdir(self.wc)
985 os.chdir(self.wc)
986
986
987 def postrun(self):
987 def postrun(self):
988 if self.wc:
988 if self.wc:
989 os.chdir(self.cwd)
989 os.chdir(self.cwd)
990
990
991 def join(self, name):
991 def join(self, name):
992 return os.path.join(self.wc, '.svn', name)
992 return os.path.join(self.wc, '.svn', name)
993
993
994 def revmapfile(self):
994 def revmapfile(self):
995 return self.join('hg-shamap')
995 return self.join('hg-shamap')
996
996
997 def authorfile(self):
997 def authorfile(self):
998 return self.join('hg-authormap')
998 return self.join('hg-authormap')
999
999
1000 def __init__(self, ui, path):
1000 def __init__(self, ui, path):
1001 converter_sink.__init__(self, ui, path)
1001 converter_sink.__init__(self, ui, path)
1002 commandline.__init__(self, ui, 'svn')
1002 commandline.__init__(self, ui, 'svn')
1003 self.delete = []
1003 self.delete = []
1004 self.setexec = []
1004 self.setexec = []
1005 self.delexec = []
1005 self.delexec = []
1006 self.copies = []
1006 self.copies = []
1007 self.wc = None
1007 self.wc = None
1008 self.cwd = os.getcwd()
1008 self.cwd = os.getcwd()
1009
1009
1010 path = os.path.realpath(path)
1010 path = os.path.realpath(path)
1011
1011
1012 created = False
1012 created = False
1013 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
1013 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
1014 self.wc = path
1014 self.wc = path
1015 self.run0('update')
1015 self.run0('update')
1016 else:
1016 else:
1017 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
1017 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
1018
1018
1019 if os.path.isdir(os.path.dirname(path)):
1019 if os.path.isdir(os.path.dirname(path)):
1020 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
1020 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
1021 ui.status(_('initializing svn repo %r\n') %
1021 ui.status(_('initializing svn repo %r\n') %
1022 os.path.basename(path))
1022 os.path.basename(path))
1023 commandline(ui, 'svnadmin').run0('create', path)
1023 commandline(ui, 'svnadmin').run0('create', path)
1024 created = path
1024 created = path
1025 path = util.normpath(path)
1025 path = util.normpath(path)
1026 if not path.startswith('/'):
1026 if not path.startswith('/'):
1027 path = '/' + path
1027 path = '/' + path
1028 path = 'file://' + path
1028 path = 'file://' + path
1029
1029
1030 ui.status(_('initializing svn wc %r\n') % os.path.basename(wcpath))
1030 ui.status(_('initializing svn wc %r\n') % os.path.basename(wcpath))
1031 self.run0('checkout', path, wcpath)
1031 self.run0('checkout', path, wcpath)
1032
1032
1033 self.wc = wcpath
1033 self.wc = wcpath
1034 self.opener = util.opener(self.wc)
1034 self.opener = util.opener(self.wc)
1035 self.wopener = util.opener(self.wc)
1035 self.wopener = util.opener(self.wc)
1036 self.childmap = mapfile(ui, self.join('hg-childmap'))
1036 self.childmap = mapfile(ui, self.join('hg-childmap'))
1037 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
1037 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
1038
1038
1039 if created:
1039 if created:
1040 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1040 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1041 fp = open(hook, 'w')
1041 fp = open(hook, 'w')
1042 fp.write(pre_revprop_change)
1042 fp.write(pre_revprop_change)
1043 fp.close()
1043 fp.close()
1044 util.set_flags(hook, False, True)
1044 util.set_flags(hook, False, True)
1045
1045
1046 xport = transport.SvnRaTransport(url=geturl(path))
1046 xport = transport.SvnRaTransport(url=geturl(path))
1047 self.uuid = svn.ra.get_uuid(xport.ra)
1047 self.uuid = svn.ra.get_uuid(xport.ra)
1048
1048
1049 def wjoin(self, *names):
1049 def wjoin(self, *names):
1050 return os.path.join(self.wc, *names)
1050 return os.path.join(self.wc, *names)
1051
1051
1052 def putfile(self, filename, flags, data):
1052 def putfile(self, filename, flags, data):
1053 if 'l' in flags:
1053 if 'l' in flags:
1054 self.wopener.symlink(data, filename)
1054 self.wopener.symlink(data, filename)
1055 else:
1055 else:
1056 try:
1056 try:
1057 if os.path.islink(self.wjoin(filename)):
1057 if os.path.islink(self.wjoin(filename)):
1058 os.unlink(filename)
1058 os.unlink(filename)
1059 except OSError:
1059 except OSError:
1060 pass
1060 pass
1061 self.wopener(filename, 'w').write(data)
1061 self.wopener(filename, 'w').write(data)
1062
1062
1063 if self.is_exec:
1063 if self.is_exec:
1064 was_exec = self.is_exec(self.wjoin(filename))
1064 was_exec = self.is_exec(self.wjoin(filename))
1065 else:
1065 else:
1066 # On filesystems not supporting execute-bit, there is no way
1066 # On filesystems not supporting execute-bit, there is no way
1067 # to know if it is set but asking subversion. Setting it
1067 # to know if it is set but asking subversion. Setting it
1068 # systematically is just as expensive and much simpler.
1068 # systematically is just as expensive and much simpler.
1069 was_exec = 'x' not in flags
1069 was_exec = 'x' not in flags
1070
1070
1071 util.set_flags(self.wjoin(filename), False, 'x' in flags)
1071 util.set_flags(self.wjoin(filename), False, 'x' in flags)
1072 if was_exec:
1072 if was_exec:
1073 if 'x' not in flags:
1073 if 'x' not in flags:
1074 self.delexec.append(filename)
1074 self.delexec.append(filename)
1075 else:
1075 else:
1076 if 'x' in flags:
1076 if 'x' in flags:
1077 self.setexec.append(filename)
1077 self.setexec.append(filename)
1078
1078
1079 def _copyfile(self, source, dest):
1079 def _copyfile(self, source, dest):
1080 # SVN's copy command pukes if the destination file exists, but
1080 # SVN's copy command pukes if the destination file exists, but
1081 # our copyfile method expects to record a copy that has
1081 # our copyfile method expects to record a copy that has
1082 # already occurred. Cross the semantic gap.
1082 # already occurred. Cross the semantic gap.
1083 wdest = self.wjoin(dest)
1083 wdest = self.wjoin(dest)
1084 exists = os.path.exists(wdest)
1084 exists = os.path.exists(wdest)
1085 if exists:
1085 if exists:
1086 fd, tempname = tempfile.mkstemp(
1086 fd, tempname = tempfile.mkstemp(
1087 prefix='hg-copy-', dir=os.path.dirname(wdest))
1087 prefix='hg-copy-', dir=os.path.dirname(wdest))
1088 os.close(fd)
1088 os.close(fd)
1089 os.unlink(tempname)
1089 os.unlink(tempname)
1090 os.rename(wdest, tempname)
1090 os.rename(wdest, tempname)
1091 try:
1091 try:
1092 self.run0('copy', source, dest)
1092 self.run0('copy', source, dest)
1093 finally:
1093 finally:
1094 if exists:
1094 if exists:
1095 try:
1095 try:
1096 os.unlink(wdest)
1096 os.unlink(wdest)
1097 except OSError:
1097 except OSError:
1098 pass
1098 pass
1099 os.rename(tempname, wdest)
1099 os.rename(tempname, wdest)
1100
1100
1101 def dirs_of(self, files):
1101 def dirs_of(self, files):
1102 dirs = set()
1102 dirs = set()
1103 for f in files:
1103 for f in files:
1104 if os.path.isdir(self.wjoin(f)):
1104 if os.path.isdir(self.wjoin(f)):
1105 dirs.add(f)
1105 dirs.add(f)
1106 for i in strutil.rfindall(f, '/'):
1106 for i in strutil.rfindall(f, '/'):
1107 dirs.add(f[:i])
1107 dirs.add(f[:i])
1108 return dirs
1108 return dirs
1109
1109
1110 def add_dirs(self, files):
1110 def add_dirs(self, files):
1111 add_dirs = [d for d in util.sort(self.dirs_of(files))
1111 add_dirs = [d for d in util.sort(self.dirs_of(files))
1112 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
1112 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
1113 if add_dirs:
1113 if add_dirs:
1114 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1114 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1115 return add_dirs
1115 return add_dirs
1116
1116
1117 def add_files(self, files):
1117 def add_files(self, files):
1118 if files:
1118 if files:
1119 self.xargs(files, 'add', quiet=True)
1119 self.xargs(files, 'add', quiet=True)
1120 return files
1120 return files
1121
1121
1122 def tidy_dirs(self, names):
1122 def tidy_dirs(self, names):
1123 dirs = util.sort(self.dirs_of(names))
1123 dirs = util.sort(self.dirs_of(names))
1124 dirs.reverse()
1124 dirs.reverse()
1125 deleted = []
1125 deleted = []
1126 for d in dirs:
1126 for d in dirs:
1127 wd = self.wjoin(d)
1127 wd = self.wjoin(d)
1128 if os.listdir(wd) == '.svn':
1128 if os.listdir(wd) == '.svn':
1129 self.run0('delete', d)
1129 self.run0('delete', d)
1130 deleted.append(d)
1130 deleted.append(d)
1131 return deleted
1131 return deleted
1132
1132
1133 def addchild(self, parent, child):
1133 def addchild(self, parent, child):
1134 self.childmap[parent] = child
1134 self.childmap[parent] = child
1135
1135
1136 def revid(self, rev):
1136 def revid(self, rev):
1137 return u"svn:%s@%s" % (self.uuid, rev)
1137 return u"svn:%s@%s" % (self.uuid, rev)
1138
1138
1139 def putcommit(self, files, copies, parents, commit, source):
1139 def putcommit(self, files, copies, parents, commit, source):
1140 # Apply changes to working copy
1140 # Apply changes to working copy
1141 for f, v in files:
1141 for f, v in files:
1142 try:
1142 try:
1143 data = source.getfile(f, v)
1143 data = source.getfile(f, v)
1144 except IOError:
1144 except IOError:
1145 self.delete.append(f)
1145 self.delete.append(f)
1146 else:
1146 else:
1147 e = source.getmode(f, v)
1147 e = source.getmode(f, v)
1148 self.putfile(f, e, data)
1148 self.putfile(f, e, data)
1149 if f in copies:
1149 if f in copies:
1150 self.copies.append([copies[f], f])
1150 self.copies.append([copies[f], f])
1151 files = [f[0] for f in files]
1151 files = [f[0] for f in files]
1152
1152
1153 for parent in parents:
1153 for parent in parents:
1154 try:
1154 try:
1155 return self.revid(self.childmap[parent])
1155 return self.revid(self.childmap[parent])
1156 except KeyError:
1156 except KeyError:
1157 pass
1157 pass
1158 entries = set(self.delete)
1158 entries = set(self.delete)
1159 files = frozenset(files)
1159 files = frozenset(files)
1160 entries.update(self.add_dirs(files.difference(entries)))
1160 entries.update(self.add_dirs(files.difference(entries)))
1161 if self.copies:
1161 if self.copies:
1162 for s, d in self.copies:
1162 for s, d in self.copies:
1163 self._copyfile(s, d)
1163 self._copyfile(s, d)
1164 self.copies = []
1164 self.copies = []
1165 if self.delete:
1165 if self.delete:
1166 self.xargs(self.delete, 'delete')
1166 self.xargs(self.delete, 'delete')
1167 self.delete = []
1167 self.delete = []
1168 entries.update(self.add_files(files.difference(entries)))
1168 entries.update(self.add_files(files.difference(entries)))
1169 entries.update(self.tidy_dirs(entries))
1169 entries.update(self.tidy_dirs(entries))
1170 if self.delexec:
1170 if self.delexec:
1171 self.xargs(self.delexec, 'propdel', 'svn:executable')
1171 self.xargs(self.delexec, 'propdel', 'svn:executable')
1172 self.delexec = []
1172 self.delexec = []
1173 if self.setexec:
1173 if self.setexec:
1174 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1174 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1175 self.setexec = []
1175 self.setexec = []
1176
1176
1177 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1177 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1178 fp = os.fdopen(fd, 'w')
1178 fp = os.fdopen(fd, 'w')
1179 fp.write(commit.desc)
1179 fp.write(commit.desc)
1180 fp.close()
1180 fp.close()
1181 try:
1181 try:
1182 output = self.run0('commit',
1182 output = self.run0('commit',
1183 username=util.shortuser(commit.author),
1183 username=util.shortuser(commit.author),
1184 file=messagefile,
1184 file=messagefile,
1185 encoding='utf-8')
1185 encoding='utf-8')
1186 try:
1186 try:
1187 rev = self.commit_re.search(output).group(1)
1187 rev = self.commit_re.search(output).group(1)
1188 except AttributeError:
1188 except AttributeError:
1189 self.ui.warn(_('unexpected svn output:\n'))
1189 self.ui.warn(_('unexpected svn output:\n'))
1190 self.ui.warn(output)
1190 self.ui.warn(output)
1191 raise util.Abort(_('unable to cope with svn output'))
1191 raise util.Abort(_('unable to cope with svn output'))
1192 if commit.rev:
1192 if commit.rev:
1193 self.run('propset', 'hg:convert-rev', commit.rev,
1193 self.run('propset', 'hg:convert-rev', commit.rev,
1194 revprop=True, revision=rev)
1194 revprop=True, revision=rev)
1195 if commit.branch and commit.branch != 'default':
1195 if commit.branch and commit.branch != 'default':
1196 self.run('propset', 'hg:convert-branch', commit.branch,
1196 self.run('propset', 'hg:convert-branch', commit.branch,
1197 revprop=True, revision=rev)
1197 revprop=True, revision=rev)
1198 for parent in parents:
1198 for parent in parents:
1199 self.addchild(parent, rev)
1199 self.addchild(parent, rev)
1200 return self.revid(rev)
1200 return self.revid(rev)
1201 finally:
1201 finally:
1202 os.unlink(messagefile)
1202 os.unlink(messagefile)
1203
1203
1204 def puttags(self, tags):
1204 def puttags(self, tags):
1205 self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
1205 self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
@@ -1,2613 +1,2613 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial.node import bin, hex, short, nullid, nullrev
33 from mercurial.node import bin, hex, short, nullid, nullrev
34 from mercurial.lock import release
34 from mercurial.lock import release
35 from mercurial import commands, cmdutil, hg, patch, util
35 from mercurial import commands, cmdutil, hg, patch, util
36 from mercurial import repair, extensions, url, error
36 from mercurial import repair, extensions, url, error
37 import os, sys, re, errno
37 import os, sys, re, errno
38
38
39 commands.norepo += " qclone"
39 commands.norepo += " qclone"
40
40
41 # Patch names looks like unix-file names.
41 # Patch names looks like unix-file names.
42 # They must be joinable with queue directory and result in the patch path.
42 # They must be joinable with queue directory and result in the patch path.
43 normname = util.normpath
43 normname = util.normpath
44
44
45 class statusentry:
45 class statusentry:
46 def __init__(self, rev, name=None):
46 def __init__(self, rev, name=None):
47 if not name:
47 if not name:
48 fields = rev.split(':', 1)
48 fields = rev.split(':', 1)
49 if len(fields) == 2:
49 if len(fields) == 2:
50 self.rev, self.name = fields
50 self.rev, self.name = fields
51 else:
51 else:
52 self.rev, self.name = None, None
52 self.rev, self.name = None, None
53 else:
53 else:
54 self.rev, self.name = rev, name
54 self.rev, self.name = rev, name
55
55
56 def __str__(self):
56 def __str__(self):
57 return self.rev + ':' + self.name
57 return self.rev + ':' + self.name
58
58
59 class patchheader(object):
59 class patchheader(object):
60 def __init__(self, message, comments, user, date, haspatch):
60 def __init__(self, message, comments, user, date, haspatch):
61 self.message = message
61 self.message = message
62 self.comments = comments
62 self.comments = comments
63 self.user = user
63 self.user = user
64 self.date = date
64 self.date = date
65 self.haspatch = haspatch
65 self.haspatch = haspatch
66
66
67 def setuser(self, user):
67 def setuser(self, user):
68 if not self.setheader(['From: ', '# User '], user):
68 if not self.setheader(['From: ', '# User '], user):
69 try:
69 try:
70 patchheaderat = self.comments.index('# HG changeset patch')
70 patchheaderat = self.comments.index('# HG changeset patch')
71 self.comments.insert(patchheaderat + 1,'# User ' + user)
71 self.comments.insert(patchheaderat + 1,'# User ' + user)
72 except ValueError:
72 except ValueError:
73 self.comments = ['From: ' + user, ''] + self.comments
73 self.comments = ['From: ' + user, ''] + self.comments
74 self.user = user
74 self.user = user
75
75
76 def setdate(self, date):
76 def setdate(self, date):
77 if self.setheader(['# Date '], date):
77 if self.setheader(['# Date '], date):
78 self.date = date
78 self.date = date
79
79
80 def setmessage(self, message):
80 def setmessage(self, message):
81 if self.comments:
81 if self.comments:
82 self._delmsg()
82 self._delmsg()
83 self.message = [message]
83 self.message = [message]
84 self.comments += self.message
84 self.comments += self.message
85
85
86 def setheader(self, prefixes, new):
86 def setheader(self, prefixes, new):
87 '''Update all references to a field in the patch header.
87 '''Update all references to a field in the patch header.
88 If none found, add it email style.'''
88 If none found, add it email style.'''
89 res = False
89 res = False
90 for prefix in prefixes:
90 for prefix in prefixes:
91 for i in xrange(len(self.comments)):
91 for i in xrange(len(self.comments)):
92 if self.comments[i].startswith(prefix):
92 if self.comments[i].startswith(prefix):
93 self.comments[i] = prefix + new
93 self.comments[i] = prefix + new
94 res = True
94 res = True
95 break
95 break
96 return res
96 return res
97
97
98 def __str__(self):
98 def __str__(self):
99 if not self.comments:
99 if not self.comments:
100 return ''
100 return ''
101 return '\n'.join(self.comments) + '\n\n'
101 return '\n'.join(self.comments) + '\n\n'
102
102
103 def _delmsg(self):
103 def _delmsg(self):
104 '''Remove existing message, keeping the rest of the comments fields.
104 '''Remove existing message, keeping the rest of the comments fields.
105 If comments contains 'subject: ', message will prepend
105 If comments contains 'subject: ', message will prepend
106 the field and a blank line.'''
106 the field and a blank line.'''
107 if self.message:
107 if self.message:
108 subj = 'subject: ' + self.message[0].lower()
108 subj = 'subject: ' + self.message[0].lower()
109 for i in xrange(len(self.comments)):
109 for i in xrange(len(self.comments)):
110 if subj == self.comments[i].lower():
110 if subj == self.comments[i].lower():
111 del self.comments[i]
111 del self.comments[i]
112 self.message = self.message[2:]
112 self.message = self.message[2:]
113 break
113 break
114 ci = 0
114 ci = 0
115 for mi in xrange(len(self.message)):
115 for mi in xrange(len(self.message)):
116 while self.message[mi] != self.comments[ci]:
116 while self.message[mi] != self.comments[ci]:
117 ci += 1
117 ci += 1
118 del self.comments[ci]
118 del self.comments[ci]
119
119
120 class queue:
120 class queue:
121 def __init__(self, ui, path, patchdir=None):
121 def __init__(self, ui, path, patchdir=None):
122 self.basepath = path
122 self.basepath = path
123 self.path = patchdir or os.path.join(path, "patches")
123 self.path = patchdir or os.path.join(path, "patches")
124 self.opener = util.opener(self.path)
124 self.opener = util.opener(self.path)
125 self.ui = ui
125 self.ui = ui
126 self.applied = []
126 self.applied = []
127 self.full_series = []
127 self.full_series = []
128 self.applied_dirty = 0
128 self.applied_dirty = 0
129 self.series_dirty = 0
129 self.series_dirty = 0
130 self.series_path = "series"
130 self.series_path = "series"
131 self.status_path = "status"
131 self.status_path = "status"
132 self.guards_path = "guards"
132 self.guards_path = "guards"
133 self.active_guards = None
133 self.active_guards = None
134 self.guards_dirty = False
134 self.guards_dirty = False
135 self._diffopts = None
135 self._diffopts = None
136
136
137 if os.path.exists(self.join(self.series_path)):
137 if os.path.exists(self.join(self.series_path)):
138 self.full_series = self.opener(self.series_path).read().splitlines()
138 self.full_series = self.opener(self.series_path).read().splitlines()
139 self.parse_series()
139 self.parse_series()
140
140
141 if os.path.exists(self.join(self.status_path)):
141 if os.path.exists(self.join(self.status_path)):
142 lines = self.opener(self.status_path).read().splitlines()
142 lines = self.opener(self.status_path).read().splitlines()
143 self.applied = [statusentry(l) for l in lines]
143 self.applied = [statusentry(l) for l in lines]
144
144
145 def diffopts(self):
145 def diffopts(self):
146 if self._diffopts is None:
146 if self._diffopts is None:
147 self._diffopts = patch.diffopts(self.ui)
147 self._diffopts = patch.diffopts(self.ui)
148 return self._diffopts
148 return self._diffopts
149
149
150 def join(self, *p):
150 def join(self, *p):
151 return os.path.join(self.path, *p)
151 return os.path.join(self.path, *p)
152
152
153 def find_series(self, patch):
153 def find_series(self, patch):
154 pre = re.compile("(\s*)([^#]+)")
154 pre = re.compile("(\s*)([^#]+)")
155 index = 0
155 index = 0
156 for l in self.full_series:
156 for l in self.full_series:
157 m = pre.match(l)
157 m = pre.match(l)
158 if m:
158 if m:
159 s = m.group(2)
159 s = m.group(2)
160 s = s.rstrip()
160 s = s.rstrip()
161 if s == patch:
161 if s == patch:
162 return index
162 return index
163 index += 1
163 index += 1
164 return None
164 return None
165
165
166 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
166 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
167
167
168 def parse_series(self):
168 def parse_series(self):
169 self.series = []
169 self.series = []
170 self.series_guards = []
170 self.series_guards = []
171 for l in self.full_series:
171 for l in self.full_series:
172 h = l.find('#')
172 h = l.find('#')
173 if h == -1:
173 if h == -1:
174 patch = l
174 patch = l
175 comment = ''
175 comment = ''
176 elif h == 0:
176 elif h == 0:
177 continue
177 continue
178 else:
178 else:
179 patch = l[:h]
179 patch = l[:h]
180 comment = l[h:]
180 comment = l[h:]
181 patch = patch.strip()
181 patch = patch.strip()
182 if patch:
182 if patch:
183 if patch in self.series:
183 if patch in self.series:
184 raise util.Abort(_('%s appears more than once in %s') %
184 raise util.Abort(_('%s appears more than once in %s') %
185 (patch, self.join(self.series_path)))
185 (patch, self.join(self.series_path)))
186 self.series.append(patch)
186 self.series.append(patch)
187 self.series_guards.append(self.guard_re.findall(comment))
187 self.series_guards.append(self.guard_re.findall(comment))
188
188
189 def check_guard(self, guard):
189 def check_guard(self, guard):
190 if not guard:
190 if not guard:
191 return _('guard cannot be an empty string')
191 return _('guard cannot be an empty string')
192 bad_chars = '# \t\r\n\f'
192 bad_chars = '# \t\r\n\f'
193 first = guard[0]
193 first = guard[0]
194 for c in '-+':
194 for c in '-+':
195 if first == c:
195 if first == c:
196 return (_('guard %r starts with invalid character: %r') %
196 return (_('guard %r starts with invalid character: %r') %
197 (guard, c))
197 (guard, c))
198 for c in bad_chars:
198 for c in bad_chars:
199 if c in guard:
199 if c in guard:
200 return _('invalid character in guard %r: %r') % (guard, c)
200 return _('invalid character in guard %r: %r') % (guard, c)
201
201
202 def set_active(self, guards):
202 def set_active(self, guards):
203 for guard in guards:
203 for guard in guards:
204 bad = self.check_guard(guard)
204 bad = self.check_guard(guard)
205 if bad:
205 if bad:
206 raise util.Abort(bad)
206 raise util.Abort(bad)
207 guards = util.sort(util.unique(guards))
207 guards = util.sort(set(guards))
208 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
208 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
209 self.active_guards = guards
209 self.active_guards = guards
210 self.guards_dirty = True
210 self.guards_dirty = True
211
211
212 def active(self):
212 def active(self):
213 if self.active_guards is None:
213 if self.active_guards is None:
214 self.active_guards = []
214 self.active_guards = []
215 try:
215 try:
216 guards = self.opener(self.guards_path).read().split()
216 guards = self.opener(self.guards_path).read().split()
217 except IOError, err:
217 except IOError, err:
218 if err.errno != errno.ENOENT: raise
218 if err.errno != errno.ENOENT: raise
219 guards = []
219 guards = []
220 for i, guard in enumerate(guards):
220 for i, guard in enumerate(guards):
221 bad = self.check_guard(guard)
221 bad = self.check_guard(guard)
222 if bad:
222 if bad:
223 self.ui.warn('%s:%d: %s\n' %
223 self.ui.warn('%s:%d: %s\n' %
224 (self.join(self.guards_path), i + 1, bad))
224 (self.join(self.guards_path), i + 1, bad))
225 else:
225 else:
226 self.active_guards.append(guard)
226 self.active_guards.append(guard)
227 return self.active_guards
227 return self.active_guards
228
228
229 def set_guards(self, idx, guards):
229 def set_guards(self, idx, guards):
230 for g in guards:
230 for g in guards:
231 if len(g) < 2:
231 if len(g) < 2:
232 raise util.Abort(_('guard %r too short') % g)
232 raise util.Abort(_('guard %r too short') % g)
233 if g[0] not in '-+':
233 if g[0] not in '-+':
234 raise util.Abort(_('guard %r starts with invalid char') % g)
234 raise util.Abort(_('guard %r starts with invalid char') % g)
235 bad = self.check_guard(g[1:])
235 bad = self.check_guard(g[1:])
236 if bad:
236 if bad:
237 raise util.Abort(bad)
237 raise util.Abort(bad)
238 drop = self.guard_re.sub('', self.full_series[idx])
238 drop = self.guard_re.sub('', self.full_series[idx])
239 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
239 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
240 self.parse_series()
240 self.parse_series()
241 self.series_dirty = True
241 self.series_dirty = True
242
242
243 def pushable(self, idx):
243 def pushable(self, idx):
244 if isinstance(idx, str):
244 if isinstance(idx, str):
245 idx = self.series.index(idx)
245 idx = self.series.index(idx)
246 patchguards = self.series_guards[idx]
246 patchguards = self.series_guards[idx]
247 if not patchguards:
247 if not patchguards:
248 return True, None
248 return True, None
249 guards = self.active()
249 guards = self.active()
250 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
250 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
251 if exactneg:
251 if exactneg:
252 return False, exactneg[0]
252 return False, exactneg[0]
253 pos = [g for g in patchguards if g[0] == '+']
253 pos = [g for g in patchguards if g[0] == '+']
254 exactpos = [g for g in pos if g[1:] in guards]
254 exactpos = [g for g in pos if g[1:] in guards]
255 if pos:
255 if pos:
256 if exactpos:
256 if exactpos:
257 return True, exactpos[0]
257 return True, exactpos[0]
258 return False, pos
258 return False, pos
259 return True, ''
259 return True, ''
260
260
261 def explain_pushable(self, idx, all_patches=False):
261 def explain_pushable(self, idx, all_patches=False):
262 write = all_patches and self.ui.write or self.ui.warn
262 write = all_patches and self.ui.write or self.ui.warn
263 if all_patches or self.ui.verbose:
263 if all_patches or self.ui.verbose:
264 if isinstance(idx, str):
264 if isinstance(idx, str):
265 idx = self.series.index(idx)
265 idx = self.series.index(idx)
266 pushable, why = self.pushable(idx)
266 pushable, why = self.pushable(idx)
267 if all_patches and pushable:
267 if all_patches and pushable:
268 if why is None:
268 if why is None:
269 write(_('allowing %s - no guards in effect\n') %
269 write(_('allowing %s - no guards in effect\n') %
270 self.series[idx])
270 self.series[idx])
271 else:
271 else:
272 if not why:
272 if not why:
273 write(_('allowing %s - no matching negative guards\n') %
273 write(_('allowing %s - no matching negative guards\n') %
274 self.series[idx])
274 self.series[idx])
275 else:
275 else:
276 write(_('allowing %s - guarded by %r\n') %
276 write(_('allowing %s - guarded by %r\n') %
277 (self.series[idx], why))
277 (self.series[idx], why))
278 if not pushable:
278 if not pushable:
279 if why:
279 if why:
280 write(_('skipping %s - guarded by %r\n') %
280 write(_('skipping %s - guarded by %r\n') %
281 (self.series[idx], why))
281 (self.series[idx], why))
282 else:
282 else:
283 write(_('skipping %s - no matching guards\n') %
283 write(_('skipping %s - no matching guards\n') %
284 self.series[idx])
284 self.series[idx])
285
285
286 def save_dirty(self):
286 def save_dirty(self):
287 def write_list(items, path):
287 def write_list(items, path):
288 fp = self.opener(path, 'w')
288 fp = self.opener(path, 'w')
289 for i in items:
289 for i in items:
290 fp.write("%s\n" % i)
290 fp.write("%s\n" % i)
291 fp.close()
291 fp.close()
292 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
292 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
293 if self.series_dirty: write_list(self.full_series, self.series_path)
293 if self.series_dirty: write_list(self.full_series, self.series_path)
294 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
294 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
295
295
296 def readheaders(self, patch):
296 def readheaders(self, patch):
297 def eatdiff(lines):
297 def eatdiff(lines):
298 while lines:
298 while lines:
299 l = lines[-1]
299 l = lines[-1]
300 if (l.startswith("diff -") or
300 if (l.startswith("diff -") or
301 l.startswith("Index:") or
301 l.startswith("Index:") or
302 l.startswith("===========")):
302 l.startswith("===========")):
303 del lines[-1]
303 del lines[-1]
304 else:
304 else:
305 break
305 break
306 def eatempty(lines):
306 def eatempty(lines):
307 while lines:
307 while lines:
308 l = lines[-1]
308 l = lines[-1]
309 if re.match('\s*$', l):
309 if re.match('\s*$', l):
310 del lines[-1]
310 del lines[-1]
311 else:
311 else:
312 break
312 break
313
313
314 pf = self.join(patch)
314 pf = self.join(patch)
315 message = []
315 message = []
316 comments = []
316 comments = []
317 user = None
317 user = None
318 date = None
318 date = None
319 format = None
319 format = None
320 subject = None
320 subject = None
321 diffstart = 0
321 diffstart = 0
322
322
323 for line in file(pf):
323 for line in file(pf):
324 line = line.rstrip()
324 line = line.rstrip()
325 if line.startswith('diff --git'):
325 if line.startswith('diff --git'):
326 diffstart = 2
326 diffstart = 2
327 break
327 break
328 if diffstart:
328 if diffstart:
329 if line.startswith('+++ '):
329 if line.startswith('+++ '):
330 diffstart = 2
330 diffstart = 2
331 break
331 break
332 if line.startswith("--- "):
332 if line.startswith("--- "):
333 diffstart = 1
333 diffstart = 1
334 continue
334 continue
335 elif format == "hgpatch":
335 elif format == "hgpatch":
336 # parse values when importing the result of an hg export
336 # parse values when importing the result of an hg export
337 if line.startswith("# User "):
337 if line.startswith("# User "):
338 user = line[7:]
338 user = line[7:]
339 elif line.startswith("# Date "):
339 elif line.startswith("# Date "):
340 date = line[7:]
340 date = line[7:]
341 elif not line.startswith("# ") and line:
341 elif not line.startswith("# ") and line:
342 message.append(line)
342 message.append(line)
343 format = None
343 format = None
344 elif line == '# HG changeset patch':
344 elif line == '# HG changeset patch':
345 format = "hgpatch"
345 format = "hgpatch"
346 elif (format != "tagdone" and (line.startswith("Subject: ") or
346 elif (format != "tagdone" and (line.startswith("Subject: ") or
347 line.startswith("subject: "))):
347 line.startswith("subject: "))):
348 subject = line[9:]
348 subject = line[9:]
349 format = "tag"
349 format = "tag"
350 elif (format != "tagdone" and (line.startswith("From: ") or
350 elif (format != "tagdone" and (line.startswith("From: ") or
351 line.startswith("from: "))):
351 line.startswith("from: "))):
352 user = line[6:]
352 user = line[6:]
353 format = "tag"
353 format = "tag"
354 elif format == "tag" and line == "":
354 elif format == "tag" and line == "":
355 # when looking for tags (subject: from: etc) they
355 # when looking for tags (subject: from: etc) they
356 # end once you find a blank line in the source
356 # end once you find a blank line in the source
357 format = "tagdone"
357 format = "tagdone"
358 elif message or line:
358 elif message or line:
359 message.append(line)
359 message.append(line)
360 comments.append(line)
360 comments.append(line)
361
361
362 eatdiff(message)
362 eatdiff(message)
363 eatdiff(comments)
363 eatdiff(comments)
364 eatempty(message)
364 eatempty(message)
365 eatempty(comments)
365 eatempty(comments)
366
366
367 # make sure message isn't empty
367 # make sure message isn't empty
368 if format and format.startswith("tag") and subject:
368 if format and format.startswith("tag") and subject:
369 message.insert(0, "")
369 message.insert(0, "")
370 message.insert(0, subject)
370 message.insert(0, subject)
371 return patchheader(message, comments, user, date, diffstart > 1)
371 return patchheader(message, comments, user, date, diffstart > 1)
372
372
373 def removeundo(self, repo):
373 def removeundo(self, repo):
374 undo = repo.sjoin('undo')
374 undo = repo.sjoin('undo')
375 if not os.path.exists(undo):
375 if not os.path.exists(undo):
376 return
376 return
377 try:
377 try:
378 os.unlink(undo)
378 os.unlink(undo)
379 except OSError, inst:
379 except OSError, inst:
380 self.ui.warn(_('error removing undo: %s\n') % str(inst))
380 self.ui.warn(_('error removing undo: %s\n') % str(inst))
381
381
382 def printdiff(self, repo, node1, node2=None, files=None,
382 def printdiff(self, repo, node1, node2=None, files=None,
383 fp=None, changes=None, opts={}):
383 fp=None, changes=None, opts={}):
384 m = cmdutil.match(repo, files, opts)
384 m = cmdutil.match(repo, files, opts)
385 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
385 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
386 write = fp is None and repo.ui.write or fp.write
386 write = fp is None and repo.ui.write or fp.write
387 for chunk in chunks:
387 for chunk in chunks:
388 write(chunk)
388 write(chunk)
389
389
390 def mergeone(self, repo, mergeq, head, patch, rev):
390 def mergeone(self, repo, mergeq, head, patch, rev):
391 # first try just applying the patch
391 # first try just applying the patch
392 (err, n) = self.apply(repo, [ patch ], update_status=False,
392 (err, n) = self.apply(repo, [ patch ], update_status=False,
393 strict=True, merge=rev)
393 strict=True, merge=rev)
394
394
395 if err == 0:
395 if err == 0:
396 return (err, n)
396 return (err, n)
397
397
398 if n is None:
398 if n is None:
399 raise util.Abort(_("apply failed for patch %s") % patch)
399 raise util.Abort(_("apply failed for patch %s") % patch)
400
400
401 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
401 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
402
402
403 # apply failed, strip away that rev and merge.
403 # apply failed, strip away that rev and merge.
404 hg.clean(repo, head)
404 hg.clean(repo, head)
405 self.strip(repo, n, update=False, backup='strip')
405 self.strip(repo, n, update=False, backup='strip')
406
406
407 ctx = repo[rev]
407 ctx = repo[rev]
408 ret = hg.merge(repo, rev)
408 ret = hg.merge(repo, rev)
409 if ret:
409 if ret:
410 raise util.Abort(_("update returned %d") % ret)
410 raise util.Abort(_("update returned %d") % ret)
411 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
411 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
412 if n == None:
412 if n == None:
413 raise util.Abort(_("repo commit failed"))
413 raise util.Abort(_("repo commit failed"))
414 try:
414 try:
415 ph = mergeq.readheaders(patch)
415 ph = mergeq.readheaders(patch)
416 except:
416 except:
417 raise util.Abort(_("unable to read %s") % patch)
417 raise util.Abort(_("unable to read %s") % patch)
418
418
419 patchf = self.opener(patch, "w")
419 patchf = self.opener(patch, "w")
420 comments = str(ph)
420 comments = str(ph)
421 if comments:
421 if comments:
422 patchf.write(comments)
422 patchf.write(comments)
423 self.printdiff(repo, head, n, fp=patchf)
423 self.printdiff(repo, head, n, fp=patchf)
424 patchf.close()
424 patchf.close()
425 self.removeundo(repo)
425 self.removeundo(repo)
426 return (0, n)
426 return (0, n)
427
427
428 def qparents(self, repo, rev=None):
428 def qparents(self, repo, rev=None):
429 if rev is None:
429 if rev is None:
430 (p1, p2) = repo.dirstate.parents()
430 (p1, p2) = repo.dirstate.parents()
431 if p2 == nullid:
431 if p2 == nullid:
432 return p1
432 return p1
433 if len(self.applied) == 0:
433 if len(self.applied) == 0:
434 return None
434 return None
435 return bin(self.applied[-1].rev)
435 return bin(self.applied[-1].rev)
436 pp = repo.changelog.parents(rev)
436 pp = repo.changelog.parents(rev)
437 if pp[1] != nullid:
437 if pp[1] != nullid:
438 arevs = [ x.rev for x in self.applied ]
438 arevs = [ x.rev for x in self.applied ]
439 p0 = hex(pp[0])
439 p0 = hex(pp[0])
440 p1 = hex(pp[1])
440 p1 = hex(pp[1])
441 if p0 in arevs:
441 if p0 in arevs:
442 return pp[0]
442 return pp[0]
443 if p1 in arevs:
443 if p1 in arevs:
444 return pp[1]
444 return pp[1]
445 return pp[0]
445 return pp[0]
446
446
447 def mergepatch(self, repo, mergeq, series):
447 def mergepatch(self, repo, mergeq, series):
448 if len(self.applied) == 0:
448 if len(self.applied) == 0:
449 # each of the patches merged in will have two parents. This
449 # each of the patches merged in will have two parents. This
450 # can confuse the qrefresh, qdiff, and strip code because it
450 # can confuse the qrefresh, qdiff, and strip code because it
451 # needs to know which parent is actually in the patch queue.
451 # needs to know which parent is actually in the patch queue.
452 # so, we insert a merge marker with only one parent. This way
452 # so, we insert a merge marker with only one parent. This way
453 # the first patch in the queue is never a merge patch
453 # the first patch in the queue is never a merge patch
454 #
454 #
455 pname = ".hg.patches.merge.marker"
455 pname = ".hg.patches.merge.marker"
456 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
456 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
457 self.removeundo(repo)
457 self.removeundo(repo)
458 self.applied.append(statusentry(hex(n), pname))
458 self.applied.append(statusentry(hex(n), pname))
459 self.applied_dirty = 1
459 self.applied_dirty = 1
460
460
461 head = self.qparents(repo)
461 head = self.qparents(repo)
462
462
463 for patch in series:
463 for patch in series:
464 patch = mergeq.lookup(patch, strict=True)
464 patch = mergeq.lookup(patch, strict=True)
465 if not patch:
465 if not patch:
466 self.ui.warn(_("patch %s does not exist\n") % patch)
466 self.ui.warn(_("patch %s does not exist\n") % patch)
467 return (1, None)
467 return (1, None)
468 pushable, reason = self.pushable(patch)
468 pushable, reason = self.pushable(patch)
469 if not pushable:
469 if not pushable:
470 self.explain_pushable(patch, all_patches=True)
470 self.explain_pushable(patch, all_patches=True)
471 continue
471 continue
472 info = mergeq.isapplied(patch)
472 info = mergeq.isapplied(patch)
473 if not info:
473 if not info:
474 self.ui.warn(_("patch %s is not applied\n") % patch)
474 self.ui.warn(_("patch %s is not applied\n") % patch)
475 return (1, None)
475 return (1, None)
476 rev = bin(info[1])
476 rev = bin(info[1])
477 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
477 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
478 if head:
478 if head:
479 self.applied.append(statusentry(hex(head), patch))
479 self.applied.append(statusentry(hex(head), patch))
480 self.applied_dirty = 1
480 self.applied_dirty = 1
481 if err:
481 if err:
482 return (err, head)
482 return (err, head)
483 self.save_dirty()
483 self.save_dirty()
484 return (0, head)
484 return (0, head)
485
485
486 def patch(self, repo, patchfile):
486 def patch(self, repo, patchfile):
487 '''Apply patchfile to the working directory.
487 '''Apply patchfile to the working directory.
488 patchfile: file name of patch'''
488 patchfile: file name of patch'''
489 files = {}
489 files = {}
490 try:
490 try:
491 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
491 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
492 files=files)
492 files=files)
493 except Exception, inst:
493 except Exception, inst:
494 self.ui.note(str(inst) + '\n')
494 self.ui.note(str(inst) + '\n')
495 if not self.ui.verbose:
495 if not self.ui.verbose:
496 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
496 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
497 return (False, files, False)
497 return (False, files, False)
498
498
499 return (True, files, fuzz)
499 return (True, files, fuzz)
500
500
501 def apply(self, repo, series, list=False, update_status=True,
501 def apply(self, repo, series, list=False, update_status=True,
502 strict=False, patchdir=None, merge=None, all_files={}):
502 strict=False, patchdir=None, merge=None, all_files={}):
503 wlock = lock = tr = None
503 wlock = lock = tr = None
504 try:
504 try:
505 wlock = repo.wlock()
505 wlock = repo.wlock()
506 lock = repo.lock()
506 lock = repo.lock()
507 tr = repo.transaction()
507 tr = repo.transaction()
508 try:
508 try:
509 ret = self._apply(repo, series, list, update_status,
509 ret = self._apply(repo, series, list, update_status,
510 strict, patchdir, merge, all_files=all_files)
510 strict, patchdir, merge, all_files=all_files)
511 tr.close()
511 tr.close()
512 self.save_dirty()
512 self.save_dirty()
513 return ret
513 return ret
514 except:
514 except:
515 try:
515 try:
516 tr.abort()
516 tr.abort()
517 finally:
517 finally:
518 repo.invalidate()
518 repo.invalidate()
519 repo.dirstate.invalidate()
519 repo.dirstate.invalidate()
520 raise
520 raise
521 finally:
521 finally:
522 del tr
522 del tr
523 release(lock, wlock)
523 release(lock, wlock)
524 self.removeundo(repo)
524 self.removeundo(repo)
525
525
526 def _apply(self, repo, series, list=False, update_status=True,
526 def _apply(self, repo, series, list=False, update_status=True,
527 strict=False, patchdir=None, merge=None, all_files={}):
527 strict=False, patchdir=None, merge=None, all_files={}):
528 # TODO unify with commands.py
528 # TODO unify with commands.py
529 if not patchdir:
529 if not patchdir:
530 patchdir = self.path
530 patchdir = self.path
531 err = 0
531 err = 0
532 n = None
532 n = None
533 for patchname in series:
533 for patchname in series:
534 pushable, reason = self.pushable(patchname)
534 pushable, reason = self.pushable(patchname)
535 if not pushable:
535 if not pushable:
536 self.explain_pushable(patchname, all_patches=True)
536 self.explain_pushable(patchname, all_patches=True)
537 continue
537 continue
538 self.ui.warn(_("applying %s\n") % patchname)
538 self.ui.warn(_("applying %s\n") % patchname)
539 pf = os.path.join(patchdir, patchname)
539 pf = os.path.join(patchdir, patchname)
540
540
541 try:
541 try:
542 ph = self.readheaders(patchname)
542 ph = self.readheaders(patchname)
543 except:
543 except:
544 self.ui.warn(_("Unable to read %s\n") % patchname)
544 self.ui.warn(_("Unable to read %s\n") % patchname)
545 err = 1
545 err = 1
546 break
546 break
547
547
548 message = ph.message
548 message = ph.message
549 if not message:
549 if not message:
550 message = _("imported patch %s\n") % patchname
550 message = _("imported patch %s\n") % patchname
551 else:
551 else:
552 if list:
552 if list:
553 message.append(_("\nimported patch %s") % patchname)
553 message.append(_("\nimported patch %s") % patchname)
554 message = '\n'.join(message)
554 message = '\n'.join(message)
555
555
556 if ph.haspatch:
556 if ph.haspatch:
557 (patcherr, files, fuzz) = self.patch(repo, pf)
557 (patcherr, files, fuzz) = self.patch(repo, pf)
558 all_files.update(files)
558 all_files.update(files)
559 patcherr = not patcherr
559 patcherr = not patcherr
560 else:
560 else:
561 self.ui.warn(_("patch %s is empty\n") % patchname)
561 self.ui.warn(_("patch %s is empty\n") % patchname)
562 patcherr, files, fuzz = 0, [], 0
562 patcherr, files, fuzz = 0, [], 0
563
563
564 if merge and files:
564 if merge and files:
565 # Mark as removed/merged and update dirstate parent info
565 # Mark as removed/merged and update dirstate parent info
566 removed = []
566 removed = []
567 merged = []
567 merged = []
568 for f in files:
568 for f in files:
569 if os.path.exists(repo.wjoin(f)):
569 if os.path.exists(repo.wjoin(f)):
570 merged.append(f)
570 merged.append(f)
571 else:
571 else:
572 removed.append(f)
572 removed.append(f)
573 for f in removed:
573 for f in removed:
574 repo.dirstate.remove(f)
574 repo.dirstate.remove(f)
575 for f in merged:
575 for f in merged:
576 repo.dirstate.merge(f)
576 repo.dirstate.merge(f)
577 p1, p2 = repo.dirstate.parents()
577 p1, p2 = repo.dirstate.parents()
578 repo.dirstate.setparents(p1, merge)
578 repo.dirstate.setparents(p1, merge)
579
579
580 files = patch.updatedir(self.ui, repo, files)
580 files = patch.updatedir(self.ui, repo, files)
581 match = cmdutil.matchfiles(repo, files or [])
581 match = cmdutil.matchfiles(repo, files or [])
582 n = repo.commit(files, message, ph.user, ph.date, match=match,
582 n = repo.commit(files, message, ph.user, ph.date, match=match,
583 force=True)
583 force=True)
584
584
585 if n == None:
585 if n == None:
586 raise util.Abort(_("repo commit failed"))
586 raise util.Abort(_("repo commit failed"))
587
587
588 if update_status:
588 if update_status:
589 self.applied.append(statusentry(hex(n), patchname))
589 self.applied.append(statusentry(hex(n), patchname))
590
590
591 if patcherr:
591 if patcherr:
592 self.ui.warn(_("patch failed, rejects left in working dir\n"))
592 self.ui.warn(_("patch failed, rejects left in working dir\n"))
593 err = 1
593 err = 1
594 break
594 break
595
595
596 if fuzz and strict:
596 if fuzz and strict:
597 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
597 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
598 err = 1
598 err = 1
599 break
599 break
600 return (err, n)
600 return (err, n)
601
601
602 def _clean_series(self, patches):
602 def _clean_series(self, patches):
603 indices = util.sort([self.find_series(p) for p in patches])
603 indices = util.sort([self.find_series(p) for p in patches])
604 for i in indices[-1::-1]:
604 for i in indices[-1::-1]:
605 del self.full_series[i]
605 del self.full_series[i]
606 self.parse_series()
606 self.parse_series()
607 self.series_dirty = 1
607 self.series_dirty = 1
608
608
609 def finish(self, repo, revs):
609 def finish(self, repo, revs):
610 revs.sort()
610 revs.sort()
611 firstrev = repo[self.applied[0].rev].rev()
611 firstrev = repo[self.applied[0].rev].rev()
612 appliedbase = 0
612 appliedbase = 0
613 patches = []
613 patches = []
614 for rev in util.sort(revs):
614 for rev in util.sort(revs):
615 if rev < firstrev:
615 if rev < firstrev:
616 raise util.Abort(_('revision %d is not managed') % rev)
616 raise util.Abort(_('revision %d is not managed') % rev)
617 base = bin(self.applied[appliedbase].rev)
617 base = bin(self.applied[appliedbase].rev)
618 node = repo.changelog.node(rev)
618 node = repo.changelog.node(rev)
619 if node != base:
619 if node != base:
620 raise util.Abort(_('cannot delete revision %d above '
620 raise util.Abort(_('cannot delete revision %d above '
621 'applied patches') % rev)
621 'applied patches') % rev)
622 patches.append(self.applied[appliedbase].name)
622 patches.append(self.applied[appliedbase].name)
623 appliedbase += 1
623 appliedbase += 1
624
624
625 r = self.qrepo()
625 r = self.qrepo()
626 if r:
626 if r:
627 r.remove(patches, True)
627 r.remove(patches, True)
628 else:
628 else:
629 for p in patches:
629 for p in patches:
630 os.unlink(self.join(p))
630 os.unlink(self.join(p))
631
631
632 del self.applied[:appliedbase]
632 del self.applied[:appliedbase]
633 self.applied_dirty = 1
633 self.applied_dirty = 1
634 self._clean_series(patches)
634 self._clean_series(patches)
635
635
636 def delete(self, repo, patches, opts):
636 def delete(self, repo, patches, opts):
637 if not patches and not opts.get('rev'):
637 if not patches and not opts.get('rev'):
638 raise util.Abort(_('qdelete requires at least one revision or '
638 raise util.Abort(_('qdelete requires at least one revision or '
639 'patch name'))
639 'patch name'))
640
640
641 realpatches = []
641 realpatches = []
642 for patch in patches:
642 for patch in patches:
643 patch = self.lookup(patch, strict=True)
643 patch = self.lookup(patch, strict=True)
644 info = self.isapplied(patch)
644 info = self.isapplied(patch)
645 if info:
645 if info:
646 raise util.Abort(_("cannot delete applied patch %s") % patch)
646 raise util.Abort(_("cannot delete applied patch %s") % patch)
647 if patch not in self.series:
647 if patch not in self.series:
648 raise util.Abort(_("patch %s not in series file") % patch)
648 raise util.Abort(_("patch %s not in series file") % patch)
649 realpatches.append(patch)
649 realpatches.append(patch)
650
650
651 appliedbase = 0
651 appliedbase = 0
652 if opts.get('rev'):
652 if opts.get('rev'):
653 if not self.applied:
653 if not self.applied:
654 raise util.Abort(_('no patches applied'))
654 raise util.Abort(_('no patches applied'))
655 revs = cmdutil.revrange(repo, opts['rev'])
655 revs = cmdutil.revrange(repo, opts['rev'])
656 if len(revs) > 1 and revs[0] > revs[1]:
656 if len(revs) > 1 and revs[0] > revs[1]:
657 revs.reverse()
657 revs.reverse()
658 for rev in revs:
658 for rev in revs:
659 if appliedbase >= len(self.applied):
659 if appliedbase >= len(self.applied):
660 raise util.Abort(_("revision %d is not managed") % rev)
660 raise util.Abort(_("revision %d is not managed") % rev)
661
661
662 base = bin(self.applied[appliedbase].rev)
662 base = bin(self.applied[appliedbase].rev)
663 node = repo.changelog.node(rev)
663 node = repo.changelog.node(rev)
664 if node != base:
664 if node != base:
665 raise util.Abort(_("cannot delete revision %d above "
665 raise util.Abort(_("cannot delete revision %d above "
666 "applied patches") % rev)
666 "applied patches") % rev)
667 realpatches.append(self.applied[appliedbase].name)
667 realpatches.append(self.applied[appliedbase].name)
668 appliedbase += 1
668 appliedbase += 1
669
669
670 if not opts.get('keep'):
670 if not opts.get('keep'):
671 r = self.qrepo()
671 r = self.qrepo()
672 if r:
672 if r:
673 r.remove(realpatches, True)
673 r.remove(realpatches, True)
674 else:
674 else:
675 for p in realpatches:
675 for p in realpatches:
676 os.unlink(self.join(p))
676 os.unlink(self.join(p))
677
677
678 if appliedbase:
678 if appliedbase:
679 del self.applied[:appliedbase]
679 del self.applied[:appliedbase]
680 self.applied_dirty = 1
680 self.applied_dirty = 1
681 self._clean_series(realpatches)
681 self._clean_series(realpatches)
682
682
683 def check_toppatch(self, repo):
683 def check_toppatch(self, repo):
684 if len(self.applied) > 0:
684 if len(self.applied) > 0:
685 top = bin(self.applied[-1].rev)
685 top = bin(self.applied[-1].rev)
686 pp = repo.dirstate.parents()
686 pp = repo.dirstate.parents()
687 if top not in pp:
687 if top not in pp:
688 raise util.Abort(_("working directory revision is not qtip"))
688 raise util.Abort(_("working directory revision is not qtip"))
689 return top
689 return top
690 return None
690 return None
691 def check_localchanges(self, repo, force=False, refresh=True):
691 def check_localchanges(self, repo, force=False, refresh=True):
692 m, a, r, d = repo.status()[:4]
692 m, a, r, d = repo.status()[:4]
693 if m or a or r or d:
693 if m or a or r or d:
694 if not force:
694 if not force:
695 if refresh:
695 if refresh:
696 raise util.Abort(_("local changes found, refresh first"))
696 raise util.Abort(_("local changes found, refresh first"))
697 else:
697 else:
698 raise util.Abort(_("local changes found"))
698 raise util.Abort(_("local changes found"))
699 return m, a, r, d
699 return m, a, r, d
700
700
701 _reserved = ('series', 'status', 'guards')
701 _reserved = ('series', 'status', 'guards')
702 def check_reserved_name(self, name):
702 def check_reserved_name(self, name):
703 if (name in self._reserved or name.startswith('.hg')
703 if (name in self._reserved or name.startswith('.hg')
704 or name.startswith('.mq')):
704 or name.startswith('.mq')):
705 raise util.Abort(_('"%s" cannot be used as the name of a patch')
705 raise util.Abort(_('"%s" cannot be used as the name of a patch')
706 % name)
706 % name)
707
707
708 def new(self, repo, patchfn, *pats, **opts):
708 def new(self, repo, patchfn, *pats, **opts):
709 """options:
709 """options:
710 msg: a string or a no-argument function returning a string
710 msg: a string or a no-argument function returning a string
711 """
711 """
712 msg = opts.get('msg')
712 msg = opts.get('msg')
713 force = opts.get('force')
713 force = opts.get('force')
714 user = opts.get('user')
714 user = opts.get('user')
715 date = opts.get('date')
715 date = opts.get('date')
716 if date:
716 if date:
717 date = util.parsedate(date)
717 date = util.parsedate(date)
718 self.check_reserved_name(patchfn)
718 self.check_reserved_name(patchfn)
719 if os.path.exists(self.join(patchfn)):
719 if os.path.exists(self.join(patchfn)):
720 raise util.Abort(_('patch "%s" already exists') % patchfn)
720 raise util.Abort(_('patch "%s" already exists') % patchfn)
721 if opts.get('include') or opts.get('exclude') or pats:
721 if opts.get('include') or opts.get('exclude') or pats:
722 match = cmdutil.match(repo, pats, opts)
722 match = cmdutil.match(repo, pats, opts)
723 # detect missing files in pats
723 # detect missing files in pats
724 def badfn(f, msg):
724 def badfn(f, msg):
725 raise util.Abort('%s: %s' % (f, msg))
725 raise util.Abort('%s: %s' % (f, msg))
726 match.bad = badfn
726 match.bad = badfn
727 m, a, r, d = repo.status(match=match)[:4]
727 m, a, r, d = repo.status(match=match)[:4]
728 else:
728 else:
729 m, a, r, d = self.check_localchanges(repo, force)
729 m, a, r, d = self.check_localchanges(repo, force)
730 match = cmdutil.matchfiles(repo, m + a + r)
730 match = cmdutil.matchfiles(repo, m + a + r)
731 commitfiles = m + a + r
731 commitfiles = m + a + r
732 self.check_toppatch(repo)
732 self.check_toppatch(repo)
733 insert = self.full_series_end()
733 insert = self.full_series_end()
734 wlock = repo.wlock()
734 wlock = repo.wlock()
735 try:
735 try:
736 # if patch file write fails, abort early
736 # if patch file write fails, abort early
737 p = self.opener(patchfn, "w")
737 p = self.opener(patchfn, "w")
738 try:
738 try:
739 if date:
739 if date:
740 p.write("# HG changeset patch\n")
740 p.write("# HG changeset patch\n")
741 if user:
741 if user:
742 p.write("# User " + user + "\n")
742 p.write("# User " + user + "\n")
743 p.write("# Date %d %d\n\n" % date)
743 p.write("# Date %d %d\n\n" % date)
744 elif user:
744 elif user:
745 p.write("From: " + user + "\n\n")
745 p.write("From: " + user + "\n\n")
746
746
747 if callable(msg):
747 if callable(msg):
748 msg = msg()
748 msg = msg()
749 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
749 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
750 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
750 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
751 if n == None:
751 if n == None:
752 raise util.Abort(_("repo commit failed"))
752 raise util.Abort(_("repo commit failed"))
753 try:
753 try:
754 self.full_series[insert:insert] = [patchfn]
754 self.full_series[insert:insert] = [patchfn]
755 self.applied.append(statusentry(hex(n), patchfn))
755 self.applied.append(statusentry(hex(n), patchfn))
756 self.parse_series()
756 self.parse_series()
757 self.series_dirty = 1
757 self.series_dirty = 1
758 self.applied_dirty = 1
758 self.applied_dirty = 1
759 if msg:
759 if msg:
760 msg = msg + "\n\n"
760 msg = msg + "\n\n"
761 p.write(msg)
761 p.write(msg)
762 if commitfiles:
762 if commitfiles:
763 diffopts = self.diffopts()
763 diffopts = self.diffopts()
764 if opts.get('git'): diffopts.git = True
764 if opts.get('git'): diffopts.git = True
765 parent = self.qparents(repo, n)
765 parent = self.qparents(repo, n)
766 chunks = patch.diff(repo, node1=parent, node2=n,
766 chunks = patch.diff(repo, node1=parent, node2=n,
767 match=match, opts=diffopts)
767 match=match, opts=diffopts)
768 for chunk in chunks:
768 for chunk in chunks:
769 p.write(chunk)
769 p.write(chunk)
770 p.close()
770 p.close()
771 wlock.release()
771 wlock.release()
772 wlock = None
772 wlock = None
773 r = self.qrepo()
773 r = self.qrepo()
774 if r: r.add([patchfn])
774 if r: r.add([patchfn])
775 except:
775 except:
776 repo.rollback()
776 repo.rollback()
777 raise
777 raise
778 except Exception:
778 except Exception:
779 patchpath = self.join(patchfn)
779 patchpath = self.join(patchfn)
780 try:
780 try:
781 os.unlink(patchpath)
781 os.unlink(patchpath)
782 except:
782 except:
783 self.ui.warn(_('error unlinking %s\n') % patchpath)
783 self.ui.warn(_('error unlinking %s\n') % patchpath)
784 raise
784 raise
785 self.removeundo(repo)
785 self.removeundo(repo)
786 finally:
786 finally:
787 release(wlock)
787 release(wlock)
788
788
789 def strip(self, repo, rev, update=True, backup="all", force=None):
789 def strip(self, repo, rev, update=True, backup="all", force=None):
790 wlock = lock = None
790 wlock = lock = None
791 try:
791 try:
792 wlock = repo.wlock()
792 wlock = repo.wlock()
793 lock = repo.lock()
793 lock = repo.lock()
794
794
795 if update:
795 if update:
796 self.check_localchanges(repo, force=force, refresh=False)
796 self.check_localchanges(repo, force=force, refresh=False)
797 urev = self.qparents(repo, rev)
797 urev = self.qparents(repo, rev)
798 hg.clean(repo, urev)
798 hg.clean(repo, urev)
799 repo.dirstate.write()
799 repo.dirstate.write()
800
800
801 self.removeundo(repo)
801 self.removeundo(repo)
802 repair.strip(self.ui, repo, rev, backup)
802 repair.strip(self.ui, repo, rev, backup)
803 # strip may have unbundled a set of backed up revisions after
803 # strip may have unbundled a set of backed up revisions after
804 # the actual strip
804 # the actual strip
805 self.removeundo(repo)
805 self.removeundo(repo)
806 finally:
806 finally:
807 release(lock, wlock)
807 release(lock, wlock)
808
808
809 def isapplied(self, patch):
809 def isapplied(self, patch):
810 """returns (index, rev, patch)"""
810 """returns (index, rev, patch)"""
811 for i in xrange(len(self.applied)):
811 for i in xrange(len(self.applied)):
812 a = self.applied[i]
812 a = self.applied[i]
813 if a.name == patch:
813 if a.name == patch:
814 return (i, a.rev, a.name)
814 return (i, a.rev, a.name)
815 return None
815 return None
816
816
817 # if the exact patch name does not exist, we try a few
817 # if the exact patch name does not exist, we try a few
818 # variations. If strict is passed, we try only #1
818 # variations. If strict is passed, we try only #1
819 #
819 #
820 # 1) a number to indicate an offset in the series file
820 # 1) a number to indicate an offset in the series file
821 # 2) a unique substring of the patch name was given
821 # 2) a unique substring of the patch name was given
822 # 3) patchname[-+]num to indicate an offset in the series file
822 # 3) patchname[-+]num to indicate an offset in the series file
823 def lookup(self, patch, strict=False):
823 def lookup(self, patch, strict=False):
824 patch = patch and str(patch)
824 patch = patch and str(patch)
825
825
826 def partial_name(s):
826 def partial_name(s):
827 if s in self.series:
827 if s in self.series:
828 return s
828 return s
829 matches = [x for x in self.series if s in x]
829 matches = [x for x in self.series if s in x]
830 if len(matches) > 1:
830 if len(matches) > 1:
831 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
831 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
832 for m in matches:
832 for m in matches:
833 self.ui.warn(' %s\n' % m)
833 self.ui.warn(' %s\n' % m)
834 return None
834 return None
835 if matches:
835 if matches:
836 return matches[0]
836 return matches[0]
837 if len(self.series) > 0 and len(self.applied) > 0:
837 if len(self.series) > 0 and len(self.applied) > 0:
838 if s == 'qtip':
838 if s == 'qtip':
839 return self.series[self.series_end(True)-1]
839 return self.series[self.series_end(True)-1]
840 if s == 'qbase':
840 if s == 'qbase':
841 return self.series[0]
841 return self.series[0]
842 return None
842 return None
843
843
844 if patch == None:
844 if patch == None:
845 return None
845 return None
846 if patch in self.series:
846 if patch in self.series:
847 return patch
847 return patch
848
848
849 if not os.path.isfile(self.join(patch)):
849 if not os.path.isfile(self.join(patch)):
850 try:
850 try:
851 sno = int(patch)
851 sno = int(patch)
852 except(ValueError, OverflowError):
852 except(ValueError, OverflowError):
853 pass
853 pass
854 else:
854 else:
855 if -len(self.series) <= sno < len(self.series):
855 if -len(self.series) <= sno < len(self.series):
856 return self.series[sno]
856 return self.series[sno]
857
857
858 if not strict:
858 if not strict:
859 res = partial_name(patch)
859 res = partial_name(patch)
860 if res:
860 if res:
861 return res
861 return res
862 minus = patch.rfind('-')
862 minus = patch.rfind('-')
863 if minus >= 0:
863 if minus >= 0:
864 res = partial_name(patch[:minus])
864 res = partial_name(patch[:minus])
865 if res:
865 if res:
866 i = self.series.index(res)
866 i = self.series.index(res)
867 try:
867 try:
868 off = int(patch[minus+1:] or 1)
868 off = int(patch[minus+1:] or 1)
869 except(ValueError, OverflowError):
869 except(ValueError, OverflowError):
870 pass
870 pass
871 else:
871 else:
872 if i - off >= 0:
872 if i - off >= 0:
873 return self.series[i - off]
873 return self.series[i - off]
874 plus = patch.rfind('+')
874 plus = patch.rfind('+')
875 if plus >= 0:
875 if plus >= 0:
876 res = partial_name(patch[:plus])
876 res = partial_name(patch[:plus])
877 if res:
877 if res:
878 i = self.series.index(res)
878 i = self.series.index(res)
879 try:
879 try:
880 off = int(patch[plus+1:] or 1)
880 off = int(patch[plus+1:] or 1)
881 except(ValueError, OverflowError):
881 except(ValueError, OverflowError):
882 pass
882 pass
883 else:
883 else:
884 if i + off < len(self.series):
884 if i + off < len(self.series):
885 return self.series[i + off]
885 return self.series[i + off]
886 raise util.Abort(_("patch %s not in series") % patch)
886 raise util.Abort(_("patch %s not in series") % patch)
887
887
888 def push(self, repo, patch=None, force=False, list=False,
888 def push(self, repo, patch=None, force=False, list=False,
889 mergeq=None, all=False):
889 mergeq=None, all=False):
890 wlock = repo.wlock()
890 wlock = repo.wlock()
891 if repo.dirstate.parents()[0] != repo.changelog.tip():
891 if repo.dirstate.parents()[0] != repo.changelog.tip():
892 self.ui.status(_("(working directory not at tip)\n"))
892 self.ui.status(_("(working directory not at tip)\n"))
893
893
894 if not self.series:
894 if not self.series:
895 self.ui.warn(_('no patches in series\n'))
895 self.ui.warn(_('no patches in series\n'))
896 return 0
896 return 0
897
897
898 try:
898 try:
899 patch = self.lookup(patch)
899 patch = self.lookup(patch)
900 # Suppose our series file is: A B C and the current 'top'
900 # Suppose our series file is: A B C and the current 'top'
901 # patch is B. qpush C should be performed (moving forward)
901 # patch is B. qpush C should be performed (moving forward)
902 # qpush B is a NOP (no change) qpush A is an error (can't
902 # qpush B is a NOP (no change) qpush A is an error (can't
903 # go backwards with qpush)
903 # go backwards with qpush)
904 if patch:
904 if patch:
905 info = self.isapplied(patch)
905 info = self.isapplied(patch)
906 if info:
906 if info:
907 if info[0] < len(self.applied) - 1:
907 if info[0] < len(self.applied) - 1:
908 raise util.Abort(
908 raise util.Abort(
909 _("cannot push to a previous patch: %s") % patch)
909 _("cannot push to a previous patch: %s") % patch)
910 self.ui.warn(
910 self.ui.warn(
911 _('qpush: %s is already at the top\n') % patch)
911 _('qpush: %s is already at the top\n') % patch)
912 return
912 return
913 pushable, reason = self.pushable(patch)
913 pushable, reason = self.pushable(patch)
914 if not pushable:
914 if not pushable:
915 if reason:
915 if reason:
916 reason = _('guarded by %r') % reason
916 reason = _('guarded by %r') % reason
917 else:
917 else:
918 reason = _('no matching guards')
918 reason = _('no matching guards')
919 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
919 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
920 return 1
920 return 1
921 elif all:
921 elif all:
922 patch = self.series[-1]
922 patch = self.series[-1]
923 if self.isapplied(patch):
923 if self.isapplied(patch):
924 self.ui.warn(_('all patches are currently applied\n'))
924 self.ui.warn(_('all patches are currently applied\n'))
925 return 0
925 return 0
926
926
927 # Following the above example, starting at 'top' of B:
927 # Following the above example, starting at 'top' of B:
928 # qpush should be performed (pushes C), but a subsequent
928 # qpush should be performed (pushes C), but a subsequent
929 # qpush without an argument is an error (nothing to
929 # qpush without an argument is an error (nothing to
930 # apply). This allows a loop of "...while hg qpush..." to
930 # apply). This allows a loop of "...while hg qpush..." to
931 # work as it detects an error when done
931 # work as it detects an error when done
932 start = self.series_end()
932 start = self.series_end()
933 if start == len(self.series):
933 if start == len(self.series):
934 self.ui.warn(_('patch series already fully applied\n'))
934 self.ui.warn(_('patch series already fully applied\n'))
935 return 1
935 return 1
936 if not force:
936 if not force:
937 self.check_localchanges(repo)
937 self.check_localchanges(repo)
938
938
939 self.applied_dirty = 1
939 self.applied_dirty = 1
940 if start > 0:
940 if start > 0:
941 self.check_toppatch(repo)
941 self.check_toppatch(repo)
942 if not patch:
942 if not patch:
943 patch = self.series[start]
943 patch = self.series[start]
944 end = start + 1
944 end = start + 1
945 else:
945 else:
946 end = self.series.index(patch, start) + 1
946 end = self.series.index(patch, start) + 1
947 s = self.series[start:end]
947 s = self.series[start:end]
948 all_files = {}
948 all_files = {}
949 try:
949 try:
950 if mergeq:
950 if mergeq:
951 ret = self.mergepatch(repo, mergeq, s)
951 ret = self.mergepatch(repo, mergeq, s)
952 else:
952 else:
953 ret = self.apply(repo, s, list, all_files=all_files)
953 ret = self.apply(repo, s, list, all_files=all_files)
954 except:
954 except:
955 self.ui.warn(_('cleaning up working directory...'))
955 self.ui.warn(_('cleaning up working directory...'))
956 node = repo.dirstate.parents()[0]
956 node = repo.dirstate.parents()[0]
957 hg.revert(repo, node, None)
957 hg.revert(repo, node, None)
958 unknown = repo.status(unknown=True)[4]
958 unknown = repo.status(unknown=True)[4]
959 # only remove unknown files that we know we touched or
959 # only remove unknown files that we know we touched or
960 # created while patching
960 # created while patching
961 for f in unknown:
961 for f in unknown:
962 if f in all_files:
962 if f in all_files:
963 util.unlink(repo.wjoin(f))
963 util.unlink(repo.wjoin(f))
964 self.ui.warn(_('done\n'))
964 self.ui.warn(_('done\n'))
965 raise
965 raise
966 top = self.applied[-1].name
966 top = self.applied[-1].name
967 if ret[0]:
967 if ret[0]:
968 self.ui.write(_("errors during apply, please fix and "
968 self.ui.write(_("errors during apply, please fix and "
969 "refresh %s\n") % top)
969 "refresh %s\n") % top)
970 else:
970 else:
971 self.ui.write(_("now at: %s\n") % top)
971 self.ui.write(_("now at: %s\n") % top)
972 return ret[0]
972 return ret[0]
973 finally:
973 finally:
974 wlock.release()
974 wlock.release()
975
975
976 def pop(self, repo, patch=None, force=False, update=True, all=False):
976 def pop(self, repo, patch=None, force=False, update=True, all=False):
977 def getfile(f, rev, flags):
977 def getfile(f, rev, flags):
978 t = repo.file(f).read(rev)
978 t = repo.file(f).read(rev)
979 repo.wwrite(f, t, flags)
979 repo.wwrite(f, t, flags)
980
980
981 wlock = repo.wlock()
981 wlock = repo.wlock()
982 try:
982 try:
983 if patch:
983 if patch:
984 # index, rev, patch
984 # index, rev, patch
985 info = self.isapplied(patch)
985 info = self.isapplied(patch)
986 if not info:
986 if not info:
987 patch = self.lookup(patch)
987 patch = self.lookup(patch)
988 info = self.isapplied(patch)
988 info = self.isapplied(patch)
989 if not info:
989 if not info:
990 raise util.Abort(_("patch %s is not applied") % patch)
990 raise util.Abort(_("patch %s is not applied") % patch)
991
991
992 if len(self.applied) == 0:
992 if len(self.applied) == 0:
993 # Allow qpop -a to work repeatedly,
993 # Allow qpop -a to work repeatedly,
994 # but not qpop without an argument
994 # but not qpop without an argument
995 self.ui.warn(_("no patches applied\n"))
995 self.ui.warn(_("no patches applied\n"))
996 return not all
996 return not all
997
997
998 if all:
998 if all:
999 start = 0
999 start = 0
1000 elif patch:
1000 elif patch:
1001 start = info[0] + 1
1001 start = info[0] + 1
1002 else:
1002 else:
1003 start = len(self.applied) - 1
1003 start = len(self.applied) - 1
1004
1004
1005 if start >= len(self.applied):
1005 if start >= len(self.applied):
1006 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1006 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1007 return
1007 return
1008
1008
1009 if not update:
1009 if not update:
1010 parents = repo.dirstate.parents()
1010 parents = repo.dirstate.parents()
1011 rr = [ bin(x.rev) for x in self.applied ]
1011 rr = [ bin(x.rev) for x in self.applied ]
1012 for p in parents:
1012 for p in parents:
1013 if p in rr:
1013 if p in rr:
1014 self.ui.warn(_("qpop: forcing dirstate update\n"))
1014 self.ui.warn(_("qpop: forcing dirstate update\n"))
1015 update = True
1015 update = True
1016 else:
1016 else:
1017 parents = [p.hex() for p in repo[None].parents()]
1017 parents = [p.hex() for p in repo[None].parents()]
1018 needupdate = False
1018 needupdate = False
1019 for entry in self.applied[start:]:
1019 for entry in self.applied[start:]:
1020 if entry.rev in parents:
1020 if entry.rev in parents:
1021 needupdate = True
1021 needupdate = True
1022 break
1022 break
1023 update = needupdate
1023 update = needupdate
1024
1024
1025 if not force and update:
1025 if not force and update:
1026 self.check_localchanges(repo)
1026 self.check_localchanges(repo)
1027
1027
1028 self.applied_dirty = 1
1028 self.applied_dirty = 1
1029 end = len(self.applied)
1029 end = len(self.applied)
1030 rev = bin(self.applied[start].rev)
1030 rev = bin(self.applied[start].rev)
1031 if update:
1031 if update:
1032 top = self.check_toppatch(repo)
1032 top = self.check_toppatch(repo)
1033
1033
1034 try:
1034 try:
1035 heads = repo.changelog.heads(rev)
1035 heads = repo.changelog.heads(rev)
1036 except error.LookupError:
1036 except error.LookupError:
1037 node = short(rev)
1037 node = short(rev)
1038 raise util.Abort(_('trying to pop unknown node %s') % node)
1038 raise util.Abort(_('trying to pop unknown node %s') % node)
1039
1039
1040 if heads != [bin(self.applied[-1].rev)]:
1040 if heads != [bin(self.applied[-1].rev)]:
1041 raise util.Abort(_("popping would remove a revision not "
1041 raise util.Abort(_("popping would remove a revision not "
1042 "managed by this patch queue"))
1042 "managed by this patch queue"))
1043
1043
1044 # we know there are no local changes, so we can make a simplified
1044 # we know there are no local changes, so we can make a simplified
1045 # form of hg.update.
1045 # form of hg.update.
1046 if update:
1046 if update:
1047 qp = self.qparents(repo, rev)
1047 qp = self.qparents(repo, rev)
1048 changes = repo.changelog.read(qp)
1048 changes = repo.changelog.read(qp)
1049 mmap = repo.manifest.read(changes[0])
1049 mmap = repo.manifest.read(changes[0])
1050 m, a, r, d = repo.status(qp, top)[:4]
1050 m, a, r, d = repo.status(qp, top)[:4]
1051 if d:
1051 if d:
1052 raise util.Abort(_("deletions found between repo revs"))
1052 raise util.Abort(_("deletions found between repo revs"))
1053 for f in m:
1053 for f in m:
1054 getfile(f, mmap[f], mmap.flags(f))
1054 getfile(f, mmap[f], mmap.flags(f))
1055 for f in r:
1055 for f in r:
1056 getfile(f, mmap[f], mmap.flags(f))
1056 getfile(f, mmap[f], mmap.flags(f))
1057 for f in m + r:
1057 for f in m + r:
1058 repo.dirstate.normal(f)
1058 repo.dirstate.normal(f)
1059 for f in a:
1059 for f in a:
1060 try:
1060 try:
1061 os.unlink(repo.wjoin(f))
1061 os.unlink(repo.wjoin(f))
1062 except OSError, e:
1062 except OSError, e:
1063 if e.errno != errno.ENOENT:
1063 if e.errno != errno.ENOENT:
1064 raise
1064 raise
1065 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1065 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1066 except: pass
1066 except: pass
1067 repo.dirstate.forget(f)
1067 repo.dirstate.forget(f)
1068 repo.dirstate.setparents(qp, nullid)
1068 repo.dirstate.setparents(qp, nullid)
1069 del self.applied[start:end]
1069 del self.applied[start:end]
1070 self.strip(repo, rev, update=False, backup='strip')
1070 self.strip(repo, rev, update=False, backup='strip')
1071 if len(self.applied):
1071 if len(self.applied):
1072 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1072 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1073 else:
1073 else:
1074 self.ui.write(_("patch queue now empty\n"))
1074 self.ui.write(_("patch queue now empty\n"))
1075 finally:
1075 finally:
1076 wlock.release()
1076 wlock.release()
1077
1077
1078 def diff(self, repo, pats, opts):
1078 def diff(self, repo, pats, opts):
1079 top = self.check_toppatch(repo)
1079 top = self.check_toppatch(repo)
1080 if not top:
1080 if not top:
1081 self.ui.write(_("no patches applied\n"))
1081 self.ui.write(_("no patches applied\n"))
1082 return
1082 return
1083 qp = self.qparents(repo, top)
1083 qp = self.qparents(repo, top)
1084 self._diffopts = patch.diffopts(self.ui, opts)
1084 self._diffopts = patch.diffopts(self.ui, opts)
1085 self.printdiff(repo, qp, files=pats, opts=opts)
1085 self.printdiff(repo, qp, files=pats, opts=opts)
1086
1086
1087 def refresh(self, repo, pats=None, **opts):
1087 def refresh(self, repo, pats=None, **opts):
1088 if len(self.applied) == 0:
1088 if len(self.applied) == 0:
1089 self.ui.write(_("no patches applied\n"))
1089 self.ui.write(_("no patches applied\n"))
1090 return 1
1090 return 1
1091 msg = opts.get('msg', '').rstrip()
1091 msg = opts.get('msg', '').rstrip()
1092 newuser = opts.get('user')
1092 newuser = opts.get('user')
1093 newdate = opts.get('date')
1093 newdate = opts.get('date')
1094 if newdate:
1094 if newdate:
1095 newdate = '%d %d' % util.parsedate(newdate)
1095 newdate = '%d %d' % util.parsedate(newdate)
1096 wlock = repo.wlock()
1096 wlock = repo.wlock()
1097 try:
1097 try:
1098 self.check_toppatch(repo)
1098 self.check_toppatch(repo)
1099 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1099 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1100 top = bin(top)
1100 top = bin(top)
1101 if repo.changelog.heads(top) != [top]:
1101 if repo.changelog.heads(top) != [top]:
1102 raise util.Abort(_("cannot refresh a revision with children"))
1102 raise util.Abort(_("cannot refresh a revision with children"))
1103 cparents = repo.changelog.parents(top)
1103 cparents = repo.changelog.parents(top)
1104 patchparent = self.qparents(repo, top)
1104 patchparent = self.qparents(repo, top)
1105 ph = self.readheaders(patchfn)
1105 ph = self.readheaders(patchfn)
1106
1106
1107 patchf = self.opener(patchfn, 'r')
1107 patchf = self.opener(patchfn, 'r')
1108
1108
1109 # if the patch was a git patch, refresh it as a git patch
1109 # if the patch was a git patch, refresh it as a git patch
1110 for line in patchf:
1110 for line in patchf:
1111 if line.startswith('diff --git'):
1111 if line.startswith('diff --git'):
1112 self.diffopts().git = True
1112 self.diffopts().git = True
1113 break
1113 break
1114
1114
1115 if msg:
1115 if msg:
1116 ph.setmessage(msg)
1116 ph.setmessage(msg)
1117 if newuser:
1117 if newuser:
1118 ph.setuser(newuser)
1118 ph.setuser(newuser)
1119 if newdate:
1119 if newdate:
1120 ph.setdate(newdate)
1120 ph.setdate(newdate)
1121
1121
1122 # only commit new patch when write is complete
1122 # only commit new patch when write is complete
1123 patchf = self.opener(patchfn, 'w', atomictemp=True)
1123 patchf = self.opener(patchfn, 'w', atomictemp=True)
1124
1124
1125 patchf.seek(0)
1125 patchf.seek(0)
1126 patchf.truncate()
1126 patchf.truncate()
1127
1127
1128 comments = str(ph)
1128 comments = str(ph)
1129 if comments:
1129 if comments:
1130 patchf.write(comments)
1130 patchf.write(comments)
1131
1131
1132 if opts.get('git'):
1132 if opts.get('git'):
1133 self.diffopts().git = True
1133 self.diffopts().git = True
1134 tip = repo.changelog.tip()
1134 tip = repo.changelog.tip()
1135 if top == tip:
1135 if top == tip:
1136 # if the top of our patch queue is also the tip, there is an
1136 # if the top of our patch queue is also the tip, there is an
1137 # optimization here. We update the dirstate in place and strip
1137 # optimization here. We update the dirstate in place and strip
1138 # off the tip commit. Then just commit the current directory
1138 # off the tip commit. Then just commit the current directory
1139 # tree. We can also send repo.commit the list of files
1139 # tree. We can also send repo.commit the list of files
1140 # changed to speed up the diff
1140 # changed to speed up the diff
1141 #
1141 #
1142 # in short mode, we only diff the files included in the
1142 # in short mode, we only diff the files included in the
1143 # patch already plus specified files
1143 # patch already plus specified files
1144 #
1144 #
1145 # this should really read:
1145 # this should really read:
1146 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1146 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1147 # but we do it backwards to take advantage of manifest/chlog
1147 # but we do it backwards to take advantage of manifest/chlog
1148 # caching against the next repo.status call
1148 # caching against the next repo.status call
1149 #
1149 #
1150 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1150 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1151 changes = repo.changelog.read(tip)
1151 changes = repo.changelog.read(tip)
1152 man = repo.manifest.read(changes[0])
1152 man = repo.manifest.read(changes[0])
1153 aaa = aa[:]
1153 aaa = aa[:]
1154 matchfn = cmdutil.match(repo, pats, opts)
1154 matchfn = cmdutil.match(repo, pats, opts)
1155 if opts.get('short'):
1155 if opts.get('short'):
1156 # if amending a patch, we start with existing
1156 # if amending a patch, we start with existing
1157 # files plus specified files - unfiltered
1157 # files plus specified files - unfiltered
1158 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1158 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1159 # filter with inc/exl options
1159 # filter with inc/exl options
1160 matchfn = cmdutil.match(repo, opts=opts)
1160 matchfn = cmdutil.match(repo, opts=opts)
1161 else:
1161 else:
1162 match = cmdutil.matchall(repo)
1162 match = cmdutil.matchall(repo)
1163 m, a, r, d = repo.status(match=match)[:4]
1163 m, a, r, d = repo.status(match=match)[:4]
1164
1164
1165 # we might end up with files that were added between
1165 # we might end up with files that were added between
1166 # tip and the dirstate parent, but then changed in the
1166 # tip and the dirstate parent, but then changed in the
1167 # local dirstate. in this case, we want them to only
1167 # local dirstate. in this case, we want them to only
1168 # show up in the added section
1168 # show up in the added section
1169 for x in m:
1169 for x in m:
1170 if x not in aa:
1170 if x not in aa:
1171 mm.append(x)
1171 mm.append(x)
1172 # we might end up with files added by the local dirstate that
1172 # we might end up with files added by the local dirstate that
1173 # were deleted by the patch. In this case, they should only
1173 # were deleted by the patch. In this case, they should only
1174 # show up in the changed section.
1174 # show up in the changed section.
1175 for x in a:
1175 for x in a:
1176 if x in dd:
1176 if x in dd:
1177 del dd[dd.index(x)]
1177 del dd[dd.index(x)]
1178 mm.append(x)
1178 mm.append(x)
1179 else:
1179 else:
1180 aa.append(x)
1180 aa.append(x)
1181 # make sure any files deleted in the local dirstate
1181 # make sure any files deleted in the local dirstate
1182 # are not in the add or change column of the patch
1182 # are not in the add or change column of the patch
1183 forget = []
1183 forget = []
1184 for x in d + r:
1184 for x in d + r:
1185 if x in aa:
1185 if x in aa:
1186 del aa[aa.index(x)]
1186 del aa[aa.index(x)]
1187 forget.append(x)
1187 forget.append(x)
1188 continue
1188 continue
1189 elif x in mm:
1189 elif x in mm:
1190 del mm[mm.index(x)]
1190 del mm[mm.index(x)]
1191 dd.append(x)
1191 dd.append(x)
1192
1192
1193 m = util.unique(mm)
1193 m = list(set(mm))
1194 r = util.unique(dd)
1194 r = list(set(dd))
1195 a = util.unique(aa)
1195 a = list(set(aa))
1196 c = [filter(matchfn, l) for l in (m, a, r)]
1196 c = [filter(matchfn, l) for l in (m, a, r)]
1197 match = cmdutil.matchfiles(repo, util.unique(c[0] + c[1] + c[2]))
1197 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1198 chunks = patch.diff(repo, patchparent, match=match,
1198 chunks = patch.diff(repo, patchparent, match=match,
1199 changes=c, opts=self.diffopts())
1199 changes=c, opts=self.diffopts())
1200 for chunk in chunks:
1200 for chunk in chunks:
1201 patchf.write(chunk)
1201 patchf.write(chunk)
1202
1202
1203 try:
1203 try:
1204 if self.diffopts().git:
1204 if self.diffopts().git:
1205 copies = {}
1205 copies = {}
1206 for dst in a:
1206 for dst in a:
1207 src = repo.dirstate.copied(dst)
1207 src = repo.dirstate.copied(dst)
1208 # during qfold, the source file for copies may
1208 # during qfold, the source file for copies may
1209 # be removed. Treat this as a simple add.
1209 # be removed. Treat this as a simple add.
1210 if src is not None and src in repo.dirstate:
1210 if src is not None and src in repo.dirstate:
1211 copies.setdefault(src, []).append(dst)
1211 copies.setdefault(src, []).append(dst)
1212 repo.dirstate.add(dst)
1212 repo.dirstate.add(dst)
1213 # remember the copies between patchparent and tip
1213 # remember the copies between patchparent and tip
1214 for dst in aaa:
1214 for dst in aaa:
1215 f = repo.file(dst)
1215 f = repo.file(dst)
1216 src = f.renamed(man[dst])
1216 src = f.renamed(man[dst])
1217 if src:
1217 if src:
1218 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1218 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1219 if dst in a:
1219 if dst in a:
1220 copies[src[0]].append(dst)
1220 copies[src[0]].append(dst)
1221 # we can't copy a file created by the patch itself
1221 # we can't copy a file created by the patch itself
1222 if dst in copies:
1222 if dst in copies:
1223 del copies[dst]
1223 del copies[dst]
1224 for src, dsts in copies.iteritems():
1224 for src, dsts in copies.iteritems():
1225 for dst in dsts:
1225 for dst in dsts:
1226 repo.dirstate.copy(src, dst)
1226 repo.dirstate.copy(src, dst)
1227 else:
1227 else:
1228 for dst in a:
1228 for dst in a:
1229 repo.dirstate.add(dst)
1229 repo.dirstate.add(dst)
1230 # Drop useless copy information
1230 # Drop useless copy information
1231 for f in list(repo.dirstate.copies()):
1231 for f in list(repo.dirstate.copies()):
1232 repo.dirstate.copy(None, f)
1232 repo.dirstate.copy(None, f)
1233 for f in r:
1233 for f in r:
1234 repo.dirstate.remove(f)
1234 repo.dirstate.remove(f)
1235 # if the patch excludes a modified file, mark that
1235 # if the patch excludes a modified file, mark that
1236 # file with mtime=0 so status can see it.
1236 # file with mtime=0 so status can see it.
1237 mm = []
1237 mm = []
1238 for i in xrange(len(m)-1, -1, -1):
1238 for i in xrange(len(m)-1, -1, -1):
1239 if not matchfn(m[i]):
1239 if not matchfn(m[i]):
1240 mm.append(m[i])
1240 mm.append(m[i])
1241 del m[i]
1241 del m[i]
1242 for f in m:
1242 for f in m:
1243 repo.dirstate.normal(f)
1243 repo.dirstate.normal(f)
1244 for f in mm:
1244 for f in mm:
1245 repo.dirstate.normallookup(f)
1245 repo.dirstate.normallookup(f)
1246 for f in forget:
1246 for f in forget:
1247 repo.dirstate.forget(f)
1247 repo.dirstate.forget(f)
1248
1248
1249 if not msg:
1249 if not msg:
1250 if not ph.message:
1250 if not ph.message:
1251 message = "[mq]: %s\n" % patchfn
1251 message = "[mq]: %s\n" % patchfn
1252 else:
1252 else:
1253 message = "\n".join(ph.message)
1253 message = "\n".join(ph.message)
1254 else:
1254 else:
1255 message = msg
1255 message = msg
1256
1256
1257 user = ph.user or changes[1]
1257 user = ph.user or changes[1]
1258
1258
1259 # assumes strip can roll itself back if interrupted
1259 # assumes strip can roll itself back if interrupted
1260 repo.dirstate.setparents(*cparents)
1260 repo.dirstate.setparents(*cparents)
1261 self.applied.pop()
1261 self.applied.pop()
1262 self.applied_dirty = 1
1262 self.applied_dirty = 1
1263 self.strip(repo, top, update=False,
1263 self.strip(repo, top, update=False,
1264 backup='strip')
1264 backup='strip')
1265 except:
1265 except:
1266 repo.dirstate.invalidate()
1266 repo.dirstate.invalidate()
1267 raise
1267 raise
1268
1268
1269 try:
1269 try:
1270 # might be nice to attempt to roll back strip after this
1270 # might be nice to attempt to roll back strip after this
1271 patchf.rename()
1271 patchf.rename()
1272 n = repo.commit(match.files(), message, user, ph.date,
1272 n = repo.commit(match.files(), message, user, ph.date,
1273 match=match, force=1)
1273 match=match, force=1)
1274 self.applied.append(statusentry(hex(n), patchfn))
1274 self.applied.append(statusentry(hex(n), patchfn))
1275 except:
1275 except:
1276 ctx = repo[cparents[0]]
1276 ctx = repo[cparents[0]]
1277 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1277 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1278 self.save_dirty()
1278 self.save_dirty()
1279 self.ui.warn(_('refresh interrupted while patch was popped! '
1279 self.ui.warn(_('refresh interrupted while patch was popped! '
1280 '(revert --all, qpush to recover)\n'))
1280 '(revert --all, qpush to recover)\n'))
1281 raise
1281 raise
1282 else:
1282 else:
1283 self.printdiff(repo, patchparent, fp=patchf)
1283 self.printdiff(repo, patchparent, fp=patchf)
1284 patchf.rename()
1284 patchf.rename()
1285 added = repo.status()[1]
1285 added = repo.status()[1]
1286 for a in added:
1286 for a in added:
1287 f = repo.wjoin(a)
1287 f = repo.wjoin(a)
1288 try:
1288 try:
1289 os.unlink(f)
1289 os.unlink(f)
1290 except OSError, e:
1290 except OSError, e:
1291 if e.errno != errno.ENOENT:
1291 if e.errno != errno.ENOENT:
1292 raise
1292 raise
1293 try: os.removedirs(os.path.dirname(f))
1293 try: os.removedirs(os.path.dirname(f))
1294 except: pass
1294 except: pass
1295 # forget the file copies in the dirstate
1295 # forget the file copies in the dirstate
1296 # push should readd the files later on
1296 # push should readd the files later on
1297 repo.dirstate.forget(a)
1297 repo.dirstate.forget(a)
1298 self.pop(repo, force=True)
1298 self.pop(repo, force=True)
1299 self.push(repo, force=True)
1299 self.push(repo, force=True)
1300 finally:
1300 finally:
1301 wlock.release()
1301 wlock.release()
1302 self.removeundo(repo)
1302 self.removeundo(repo)
1303
1303
1304 def init(self, repo, create=False):
1304 def init(self, repo, create=False):
1305 if not create and os.path.isdir(self.path):
1305 if not create and os.path.isdir(self.path):
1306 raise util.Abort(_("patch queue directory already exists"))
1306 raise util.Abort(_("patch queue directory already exists"))
1307 try:
1307 try:
1308 os.mkdir(self.path)
1308 os.mkdir(self.path)
1309 except OSError, inst:
1309 except OSError, inst:
1310 if inst.errno != errno.EEXIST or not create:
1310 if inst.errno != errno.EEXIST or not create:
1311 raise
1311 raise
1312 if create:
1312 if create:
1313 return self.qrepo(create=True)
1313 return self.qrepo(create=True)
1314
1314
1315 def unapplied(self, repo, patch=None):
1315 def unapplied(self, repo, patch=None):
1316 if patch and patch not in self.series:
1316 if patch and patch not in self.series:
1317 raise util.Abort(_("patch %s is not in series file") % patch)
1317 raise util.Abort(_("patch %s is not in series file") % patch)
1318 if not patch:
1318 if not patch:
1319 start = self.series_end()
1319 start = self.series_end()
1320 else:
1320 else:
1321 start = self.series.index(patch) + 1
1321 start = self.series.index(patch) + 1
1322 unapplied = []
1322 unapplied = []
1323 for i in xrange(start, len(self.series)):
1323 for i in xrange(start, len(self.series)):
1324 pushable, reason = self.pushable(i)
1324 pushable, reason = self.pushable(i)
1325 if pushable:
1325 if pushable:
1326 unapplied.append((i, self.series[i]))
1326 unapplied.append((i, self.series[i]))
1327 self.explain_pushable(i)
1327 self.explain_pushable(i)
1328 return unapplied
1328 return unapplied
1329
1329
1330 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1330 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1331 summary=False):
1331 summary=False):
1332 def displayname(patchname):
1332 def displayname(patchname):
1333 if summary:
1333 if summary:
1334 ph = self.readheaders(patchname)
1334 ph = self.readheaders(patchname)
1335 msg = ph.message
1335 msg = ph.message
1336 msg = msg and ': ' + msg[0] or ': '
1336 msg = msg and ': ' + msg[0] or ': '
1337 else:
1337 else:
1338 msg = ''
1338 msg = ''
1339 return '%s%s' % (patchname, msg)
1339 return '%s%s' % (patchname, msg)
1340
1340
1341 applied = dict.fromkeys([p.name for p in self.applied])
1341 applied = dict.fromkeys([p.name for p in self.applied])
1342 if length is None:
1342 if length is None:
1343 length = len(self.series) - start
1343 length = len(self.series) - start
1344 if not missing:
1344 if not missing:
1345 for i in xrange(start, start+length):
1345 for i in xrange(start, start+length):
1346 patch = self.series[i]
1346 patch = self.series[i]
1347 if patch in applied:
1347 if patch in applied:
1348 stat = 'A'
1348 stat = 'A'
1349 elif self.pushable(i)[0]:
1349 elif self.pushable(i)[0]:
1350 stat = 'U'
1350 stat = 'U'
1351 else:
1351 else:
1352 stat = 'G'
1352 stat = 'G'
1353 pfx = ''
1353 pfx = ''
1354 if self.ui.verbose:
1354 if self.ui.verbose:
1355 pfx = '%d %s ' % (i, stat)
1355 pfx = '%d %s ' % (i, stat)
1356 elif status and status != stat:
1356 elif status and status != stat:
1357 continue
1357 continue
1358 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1358 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1359 else:
1359 else:
1360 msng_list = []
1360 msng_list = []
1361 for root, dirs, files in os.walk(self.path):
1361 for root, dirs, files in os.walk(self.path):
1362 d = root[len(self.path) + 1:]
1362 d = root[len(self.path) + 1:]
1363 for f in files:
1363 for f in files:
1364 fl = os.path.join(d, f)
1364 fl = os.path.join(d, f)
1365 if (fl not in self.series and
1365 if (fl not in self.series and
1366 fl not in (self.status_path, self.series_path,
1366 fl not in (self.status_path, self.series_path,
1367 self.guards_path)
1367 self.guards_path)
1368 and not fl.startswith('.')):
1368 and not fl.startswith('.')):
1369 msng_list.append(fl)
1369 msng_list.append(fl)
1370 for x in util.sort(msng_list):
1370 for x in util.sort(msng_list):
1371 pfx = self.ui.verbose and ('D ') or ''
1371 pfx = self.ui.verbose and ('D ') or ''
1372 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1372 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1373
1373
1374 def issaveline(self, l):
1374 def issaveline(self, l):
1375 if l.name == '.hg.patches.save.line':
1375 if l.name == '.hg.patches.save.line':
1376 return True
1376 return True
1377
1377
1378 def qrepo(self, create=False):
1378 def qrepo(self, create=False):
1379 if create or os.path.isdir(self.join(".hg")):
1379 if create or os.path.isdir(self.join(".hg")):
1380 return hg.repository(self.ui, path=self.path, create=create)
1380 return hg.repository(self.ui, path=self.path, create=create)
1381
1381
1382 def restore(self, repo, rev, delete=None, qupdate=None):
1382 def restore(self, repo, rev, delete=None, qupdate=None):
1383 c = repo.changelog.read(rev)
1383 c = repo.changelog.read(rev)
1384 desc = c[4].strip()
1384 desc = c[4].strip()
1385 lines = desc.splitlines()
1385 lines = desc.splitlines()
1386 i = 0
1386 i = 0
1387 datastart = None
1387 datastart = None
1388 series = []
1388 series = []
1389 applied = []
1389 applied = []
1390 qpp = None
1390 qpp = None
1391 for i in xrange(0, len(lines)):
1391 for i in xrange(0, len(lines)):
1392 if lines[i] == 'Patch Data:':
1392 if lines[i] == 'Patch Data:':
1393 datastart = i + 1
1393 datastart = i + 1
1394 elif lines[i].startswith('Dirstate:'):
1394 elif lines[i].startswith('Dirstate:'):
1395 l = lines[i].rstrip()
1395 l = lines[i].rstrip()
1396 l = l[10:].split(' ')
1396 l = l[10:].split(' ')
1397 qpp = [ bin(x) for x in l ]
1397 qpp = [ bin(x) for x in l ]
1398 elif datastart != None:
1398 elif datastart != None:
1399 l = lines[i].rstrip()
1399 l = lines[i].rstrip()
1400 se = statusentry(l)
1400 se = statusentry(l)
1401 file_ = se.name
1401 file_ = se.name
1402 if se.rev:
1402 if se.rev:
1403 applied.append(se)
1403 applied.append(se)
1404 else:
1404 else:
1405 series.append(file_)
1405 series.append(file_)
1406 if datastart == None:
1406 if datastart == None:
1407 self.ui.warn(_("No saved patch data found\n"))
1407 self.ui.warn(_("No saved patch data found\n"))
1408 return 1
1408 return 1
1409 self.ui.warn(_("restoring status: %s\n") % lines[0])
1409 self.ui.warn(_("restoring status: %s\n") % lines[0])
1410 self.full_series = series
1410 self.full_series = series
1411 self.applied = applied
1411 self.applied = applied
1412 self.parse_series()
1412 self.parse_series()
1413 self.series_dirty = 1
1413 self.series_dirty = 1
1414 self.applied_dirty = 1
1414 self.applied_dirty = 1
1415 heads = repo.changelog.heads()
1415 heads = repo.changelog.heads()
1416 if delete:
1416 if delete:
1417 if rev not in heads:
1417 if rev not in heads:
1418 self.ui.warn(_("save entry has children, leaving it alone\n"))
1418 self.ui.warn(_("save entry has children, leaving it alone\n"))
1419 else:
1419 else:
1420 self.ui.warn(_("removing save entry %s\n") % short(rev))
1420 self.ui.warn(_("removing save entry %s\n") % short(rev))
1421 pp = repo.dirstate.parents()
1421 pp = repo.dirstate.parents()
1422 if rev in pp:
1422 if rev in pp:
1423 update = True
1423 update = True
1424 else:
1424 else:
1425 update = False
1425 update = False
1426 self.strip(repo, rev, update=update, backup='strip')
1426 self.strip(repo, rev, update=update, backup='strip')
1427 if qpp:
1427 if qpp:
1428 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1428 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1429 (short(qpp[0]), short(qpp[1])))
1429 (short(qpp[0]), short(qpp[1])))
1430 if qupdate:
1430 if qupdate:
1431 self.ui.status(_("queue directory updating\n"))
1431 self.ui.status(_("queue directory updating\n"))
1432 r = self.qrepo()
1432 r = self.qrepo()
1433 if not r:
1433 if not r:
1434 self.ui.warn(_("Unable to load queue repository\n"))
1434 self.ui.warn(_("Unable to load queue repository\n"))
1435 return 1
1435 return 1
1436 hg.clean(r, qpp[0])
1436 hg.clean(r, qpp[0])
1437
1437
1438 def save(self, repo, msg=None):
1438 def save(self, repo, msg=None):
1439 if len(self.applied) == 0:
1439 if len(self.applied) == 0:
1440 self.ui.warn(_("save: no patches applied, exiting\n"))
1440 self.ui.warn(_("save: no patches applied, exiting\n"))
1441 return 1
1441 return 1
1442 if self.issaveline(self.applied[-1]):
1442 if self.issaveline(self.applied[-1]):
1443 self.ui.warn(_("status is already saved\n"))
1443 self.ui.warn(_("status is already saved\n"))
1444 return 1
1444 return 1
1445
1445
1446 ar = [ ':' + x for x in self.full_series ]
1446 ar = [ ':' + x for x in self.full_series ]
1447 if not msg:
1447 if not msg:
1448 msg = _("hg patches saved state")
1448 msg = _("hg patches saved state")
1449 else:
1449 else:
1450 msg = "hg patches: " + msg.rstrip('\r\n')
1450 msg = "hg patches: " + msg.rstrip('\r\n')
1451 r = self.qrepo()
1451 r = self.qrepo()
1452 if r:
1452 if r:
1453 pp = r.dirstate.parents()
1453 pp = r.dirstate.parents()
1454 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1454 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1455 msg += "\n\nPatch Data:\n"
1455 msg += "\n\nPatch Data:\n"
1456 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1456 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1457 "\n".join(ar) + '\n' or "")
1457 "\n".join(ar) + '\n' or "")
1458 n = repo.commit(None, text, user=None, force=1)
1458 n = repo.commit(None, text, user=None, force=1)
1459 if not n:
1459 if not n:
1460 self.ui.warn(_("repo commit failed\n"))
1460 self.ui.warn(_("repo commit failed\n"))
1461 return 1
1461 return 1
1462 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1462 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1463 self.applied_dirty = 1
1463 self.applied_dirty = 1
1464 self.removeundo(repo)
1464 self.removeundo(repo)
1465
1465
1466 def full_series_end(self):
1466 def full_series_end(self):
1467 if len(self.applied) > 0:
1467 if len(self.applied) > 0:
1468 p = self.applied[-1].name
1468 p = self.applied[-1].name
1469 end = self.find_series(p)
1469 end = self.find_series(p)
1470 if end == None:
1470 if end == None:
1471 return len(self.full_series)
1471 return len(self.full_series)
1472 return end + 1
1472 return end + 1
1473 return 0
1473 return 0
1474
1474
1475 def series_end(self, all_patches=False):
1475 def series_end(self, all_patches=False):
1476 """If all_patches is False, return the index of the next pushable patch
1476 """If all_patches is False, return the index of the next pushable patch
1477 in the series, or the series length. If all_patches is True, return the
1477 in the series, or the series length. If all_patches is True, return the
1478 index of the first patch past the last applied one.
1478 index of the first patch past the last applied one.
1479 """
1479 """
1480 end = 0
1480 end = 0
1481 def next(start):
1481 def next(start):
1482 if all_patches:
1482 if all_patches:
1483 return start
1483 return start
1484 i = start
1484 i = start
1485 while i < len(self.series):
1485 while i < len(self.series):
1486 p, reason = self.pushable(i)
1486 p, reason = self.pushable(i)
1487 if p:
1487 if p:
1488 break
1488 break
1489 self.explain_pushable(i)
1489 self.explain_pushable(i)
1490 i += 1
1490 i += 1
1491 return i
1491 return i
1492 if len(self.applied) > 0:
1492 if len(self.applied) > 0:
1493 p = self.applied[-1].name
1493 p = self.applied[-1].name
1494 try:
1494 try:
1495 end = self.series.index(p)
1495 end = self.series.index(p)
1496 except ValueError:
1496 except ValueError:
1497 return 0
1497 return 0
1498 return next(end + 1)
1498 return next(end + 1)
1499 return next(end)
1499 return next(end)
1500
1500
1501 def appliedname(self, index):
1501 def appliedname(self, index):
1502 pname = self.applied[index].name
1502 pname = self.applied[index].name
1503 if not self.ui.verbose:
1503 if not self.ui.verbose:
1504 p = pname
1504 p = pname
1505 else:
1505 else:
1506 p = str(self.series.index(pname)) + " " + pname
1506 p = str(self.series.index(pname)) + " " + pname
1507 return p
1507 return p
1508
1508
1509 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1509 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1510 force=None, git=False):
1510 force=None, git=False):
1511 def checkseries(patchname):
1511 def checkseries(patchname):
1512 if patchname in self.series:
1512 if patchname in self.series:
1513 raise util.Abort(_('patch %s is already in the series file')
1513 raise util.Abort(_('patch %s is already in the series file')
1514 % patchname)
1514 % patchname)
1515 def checkfile(patchname):
1515 def checkfile(patchname):
1516 if not force and os.path.exists(self.join(patchname)):
1516 if not force and os.path.exists(self.join(patchname)):
1517 raise util.Abort(_('patch "%s" already exists')
1517 raise util.Abort(_('patch "%s" already exists')
1518 % patchname)
1518 % patchname)
1519
1519
1520 if rev:
1520 if rev:
1521 if files:
1521 if files:
1522 raise util.Abort(_('option "-r" not valid when importing '
1522 raise util.Abort(_('option "-r" not valid when importing '
1523 'files'))
1523 'files'))
1524 rev = cmdutil.revrange(repo, rev)
1524 rev = cmdutil.revrange(repo, rev)
1525 rev.sort(lambda x, y: cmp(y, x))
1525 rev.sort(lambda x, y: cmp(y, x))
1526 if (len(files) > 1 or len(rev) > 1) and patchname:
1526 if (len(files) > 1 or len(rev) > 1) and patchname:
1527 raise util.Abort(_('option "-n" not valid when importing multiple '
1527 raise util.Abort(_('option "-n" not valid when importing multiple '
1528 'patches'))
1528 'patches'))
1529 i = 0
1529 i = 0
1530 added = []
1530 added = []
1531 if rev:
1531 if rev:
1532 # If mq patches are applied, we can only import revisions
1532 # If mq patches are applied, we can only import revisions
1533 # that form a linear path to qbase.
1533 # that form a linear path to qbase.
1534 # Otherwise, they should form a linear path to a head.
1534 # Otherwise, they should form a linear path to a head.
1535 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1535 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1536 if len(heads) > 1:
1536 if len(heads) > 1:
1537 raise util.Abort(_('revision %d is the root of more than one '
1537 raise util.Abort(_('revision %d is the root of more than one '
1538 'branch') % rev[-1])
1538 'branch') % rev[-1])
1539 if self.applied:
1539 if self.applied:
1540 base = hex(repo.changelog.node(rev[0]))
1540 base = hex(repo.changelog.node(rev[0]))
1541 if base in [n.rev for n in self.applied]:
1541 if base in [n.rev for n in self.applied]:
1542 raise util.Abort(_('revision %d is already managed')
1542 raise util.Abort(_('revision %d is already managed')
1543 % rev[0])
1543 % rev[0])
1544 if heads != [bin(self.applied[-1].rev)]:
1544 if heads != [bin(self.applied[-1].rev)]:
1545 raise util.Abort(_('revision %d is not the parent of '
1545 raise util.Abort(_('revision %d is not the parent of '
1546 'the queue') % rev[0])
1546 'the queue') % rev[0])
1547 base = repo.changelog.rev(bin(self.applied[0].rev))
1547 base = repo.changelog.rev(bin(self.applied[0].rev))
1548 lastparent = repo.changelog.parentrevs(base)[0]
1548 lastparent = repo.changelog.parentrevs(base)[0]
1549 else:
1549 else:
1550 if heads != [repo.changelog.node(rev[0])]:
1550 if heads != [repo.changelog.node(rev[0])]:
1551 raise util.Abort(_('revision %d has unmanaged children')
1551 raise util.Abort(_('revision %d has unmanaged children')
1552 % rev[0])
1552 % rev[0])
1553 lastparent = None
1553 lastparent = None
1554
1554
1555 if git:
1555 if git:
1556 self.diffopts().git = True
1556 self.diffopts().git = True
1557
1557
1558 for r in rev:
1558 for r in rev:
1559 p1, p2 = repo.changelog.parentrevs(r)
1559 p1, p2 = repo.changelog.parentrevs(r)
1560 n = repo.changelog.node(r)
1560 n = repo.changelog.node(r)
1561 if p2 != nullrev:
1561 if p2 != nullrev:
1562 raise util.Abort(_('cannot import merge revision %d') % r)
1562 raise util.Abort(_('cannot import merge revision %d') % r)
1563 if lastparent and lastparent != r:
1563 if lastparent and lastparent != r:
1564 raise util.Abort(_('revision %d is not the parent of %d')
1564 raise util.Abort(_('revision %d is not the parent of %d')
1565 % (r, lastparent))
1565 % (r, lastparent))
1566 lastparent = p1
1566 lastparent = p1
1567
1567
1568 if not patchname:
1568 if not patchname:
1569 patchname = normname('%d.diff' % r)
1569 patchname = normname('%d.diff' % r)
1570 self.check_reserved_name(patchname)
1570 self.check_reserved_name(patchname)
1571 checkseries(patchname)
1571 checkseries(patchname)
1572 checkfile(patchname)
1572 checkfile(patchname)
1573 self.full_series.insert(0, patchname)
1573 self.full_series.insert(0, patchname)
1574
1574
1575 patchf = self.opener(patchname, "w")
1575 patchf = self.opener(patchname, "w")
1576 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1576 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1577 patchf.close()
1577 patchf.close()
1578
1578
1579 se = statusentry(hex(n), patchname)
1579 se = statusentry(hex(n), patchname)
1580 self.applied.insert(0, se)
1580 self.applied.insert(0, se)
1581
1581
1582 added.append(patchname)
1582 added.append(patchname)
1583 patchname = None
1583 patchname = None
1584 self.parse_series()
1584 self.parse_series()
1585 self.applied_dirty = 1
1585 self.applied_dirty = 1
1586
1586
1587 for filename in files:
1587 for filename in files:
1588 if existing:
1588 if existing:
1589 if filename == '-':
1589 if filename == '-':
1590 raise util.Abort(_('-e is incompatible with import from -'))
1590 raise util.Abort(_('-e is incompatible with import from -'))
1591 if not patchname:
1591 if not patchname:
1592 patchname = normname(filename)
1592 patchname = normname(filename)
1593 self.check_reserved_name(patchname)
1593 self.check_reserved_name(patchname)
1594 if not os.path.isfile(self.join(patchname)):
1594 if not os.path.isfile(self.join(patchname)):
1595 raise util.Abort(_("patch %s does not exist") % patchname)
1595 raise util.Abort(_("patch %s does not exist") % patchname)
1596 else:
1596 else:
1597 try:
1597 try:
1598 if filename == '-':
1598 if filename == '-':
1599 if not patchname:
1599 if not patchname:
1600 raise util.Abort(_('need --name to import a patch from -'))
1600 raise util.Abort(_('need --name to import a patch from -'))
1601 text = sys.stdin.read()
1601 text = sys.stdin.read()
1602 else:
1602 else:
1603 text = url.open(self.ui, filename).read()
1603 text = url.open(self.ui, filename).read()
1604 except (OSError, IOError):
1604 except (OSError, IOError):
1605 raise util.Abort(_("unable to read %s") % filename)
1605 raise util.Abort(_("unable to read %s") % filename)
1606 if not patchname:
1606 if not patchname:
1607 patchname = normname(os.path.basename(filename))
1607 patchname = normname(os.path.basename(filename))
1608 self.check_reserved_name(patchname)
1608 self.check_reserved_name(patchname)
1609 checkfile(patchname)
1609 checkfile(patchname)
1610 patchf = self.opener(patchname, "w")
1610 patchf = self.opener(patchname, "w")
1611 patchf.write(text)
1611 patchf.write(text)
1612 if not force:
1612 if not force:
1613 checkseries(patchname)
1613 checkseries(patchname)
1614 if patchname not in self.series:
1614 if patchname not in self.series:
1615 index = self.full_series_end() + i
1615 index = self.full_series_end() + i
1616 self.full_series[index:index] = [patchname]
1616 self.full_series[index:index] = [patchname]
1617 self.parse_series()
1617 self.parse_series()
1618 self.ui.warn(_("adding %s to series file\n") % patchname)
1618 self.ui.warn(_("adding %s to series file\n") % patchname)
1619 i += 1
1619 i += 1
1620 added.append(patchname)
1620 added.append(patchname)
1621 patchname = None
1621 patchname = None
1622 self.series_dirty = 1
1622 self.series_dirty = 1
1623 qrepo = self.qrepo()
1623 qrepo = self.qrepo()
1624 if qrepo:
1624 if qrepo:
1625 qrepo.add(added)
1625 qrepo.add(added)
1626
1626
1627 def delete(ui, repo, *patches, **opts):
1627 def delete(ui, repo, *patches, **opts):
1628 """remove patches from queue
1628 """remove patches from queue
1629
1629
1630 The patches must not be applied, unless they are arguments to the
1630 The patches must not be applied, unless they are arguments to the
1631 -r/--rev parameter. At least one patch or revision is required.
1631 -r/--rev parameter. At least one patch or revision is required.
1632
1632
1633 With --rev, mq will stop managing the named revisions (converting
1633 With --rev, mq will stop managing the named revisions (converting
1634 them to regular mercurial changesets). The qfinish command should
1634 them to regular mercurial changesets). The qfinish command should
1635 be used as an alternative for qdelete -r, as the latter option is
1635 be used as an alternative for qdelete -r, as the latter option is
1636 deprecated.
1636 deprecated.
1637
1637
1638 With -k/--keep, the patch files are preserved in the patch
1638 With -k/--keep, the patch files are preserved in the patch
1639 directory."""
1639 directory."""
1640 q = repo.mq
1640 q = repo.mq
1641 q.delete(repo, patches, opts)
1641 q.delete(repo, patches, opts)
1642 q.save_dirty()
1642 q.save_dirty()
1643 return 0
1643 return 0
1644
1644
1645 def applied(ui, repo, patch=None, **opts):
1645 def applied(ui, repo, patch=None, **opts):
1646 """print the patches already applied"""
1646 """print the patches already applied"""
1647 q = repo.mq
1647 q = repo.mq
1648 if patch:
1648 if patch:
1649 if patch not in q.series:
1649 if patch not in q.series:
1650 raise util.Abort(_("patch %s is not in series file") % patch)
1650 raise util.Abort(_("patch %s is not in series file") % patch)
1651 end = q.series.index(patch) + 1
1651 end = q.series.index(patch) + 1
1652 else:
1652 else:
1653 end = q.series_end(True)
1653 end = q.series_end(True)
1654 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1654 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1655
1655
1656 def unapplied(ui, repo, patch=None, **opts):
1656 def unapplied(ui, repo, patch=None, **opts):
1657 """print the patches not yet applied"""
1657 """print the patches not yet applied"""
1658 q = repo.mq
1658 q = repo.mq
1659 if patch:
1659 if patch:
1660 if patch not in q.series:
1660 if patch not in q.series:
1661 raise util.Abort(_("patch %s is not in series file") % patch)
1661 raise util.Abort(_("patch %s is not in series file") % patch)
1662 start = q.series.index(patch) + 1
1662 start = q.series.index(patch) + 1
1663 else:
1663 else:
1664 start = q.series_end(True)
1664 start = q.series_end(True)
1665 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1665 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1666
1666
1667 def qimport(ui, repo, *filename, **opts):
1667 def qimport(ui, repo, *filename, **opts):
1668 """import a patch
1668 """import a patch
1669
1669
1670 The patch is inserted into the series after the last applied
1670 The patch is inserted into the series after the last applied
1671 patch. If no patches have been applied, qimport prepends the patch
1671 patch. If no patches have been applied, qimport prepends the patch
1672 to the series.
1672 to the series.
1673
1673
1674 The patch will have the same name as its source file unless you
1674 The patch will have the same name as its source file unless you
1675 give it a new one with -n/--name.
1675 give it a new one with -n/--name.
1676
1676
1677 You can register an existing patch inside the patch directory with
1677 You can register an existing patch inside the patch directory with
1678 the -e/--existing flag.
1678 the -e/--existing flag.
1679
1679
1680 With -f/--force, an existing patch of the same name will be
1680 With -f/--force, an existing patch of the same name will be
1681 overwritten.
1681 overwritten.
1682
1682
1683 An existing changeset may be placed under mq control with -r/--rev
1683 An existing changeset may be placed under mq control with -r/--rev
1684 (e.g. qimport --rev tip -n patch will place tip under mq control).
1684 (e.g. qimport --rev tip -n patch will place tip under mq control).
1685 With -g/--git, patches imported with --rev will use the git diff
1685 With -g/--git, patches imported with --rev will use the git diff
1686 format. See the diffs help topic for information on why this is
1686 format. See the diffs help topic for information on why this is
1687 important for preserving rename/copy information and permission
1687 important for preserving rename/copy information and permission
1688 changes.
1688 changes.
1689
1689
1690 To import a patch from standard input, pass - as the patch file.
1690 To import a patch from standard input, pass - as the patch file.
1691 When importing from standard input, a patch name must be specified
1691 When importing from standard input, a patch name must be specified
1692 using the --name flag.
1692 using the --name flag.
1693 """
1693 """
1694 q = repo.mq
1694 q = repo.mq
1695 q.qimport(repo, filename, patchname=opts['name'],
1695 q.qimport(repo, filename, patchname=opts['name'],
1696 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1696 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1697 git=opts['git'])
1697 git=opts['git'])
1698 q.save_dirty()
1698 q.save_dirty()
1699 return 0
1699 return 0
1700
1700
1701 def init(ui, repo, **opts):
1701 def init(ui, repo, **opts):
1702 """init a new queue repository
1702 """init a new queue repository
1703
1703
1704 The queue repository is unversioned by default. If
1704 The queue repository is unversioned by default. If
1705 -c/--create-repo is specified, qinit will create a separate nested
1705 -c/--create-repo is specified, qinit will create a separate nested
1706 repository for patches (qinit -c may also be run later to convert
1706 repository for patches (qinit -c may also be run later to convert
1707 an unversioned patch repository into a versioned one). You can use
1707 an unversioned patch repository into a versioned one). You can use
1708 qcommit to commit changes to this queue repository."""
1708 qcommit to commit changes to this queue repository."""
1709 q = repo.mq
1709 q = repo.mq
1710 r = q.init(repo, create=opts['create_repo'])
1710 r = q.init(repo, create=opts['create_repo'])
1711 q.save_dirty()
1711 q.save_dirty()
1712 if r:
1712 if r:
1713 if not os.path.exists(r.wjoin('.hgignore')):
1713 if not os.path.exists(r.wjoin('.hgignore')):
1714 fp = r.wopener('.hgignore', 'w')
1714 fp = r.wopener('.hgignore', 'w')
1715 fp.write('^\\.hg\n')
1715 fp.write('^\\.hg\n')
1716 fp.write('^\\.mq\n')
1716 fp.write('^\\.mq\n')
1717 fp.write('syntax: glob\n')
1717 fp.write('syntax: glob\n')
1718 fp.write('status\n')
1718 fp.write('status\n')
1719 fp.write('guards\n')
1719 fp.write('guards\n')
1720 fp.close()
1720 fp.close()
1721 if not os.path.exists(r.wjoin('series')):
1721 if not os.path.exists(r.wjoin('series')):
1722 r.wopener('series', 'w').close()
1722 r.wopener('series', 'w').close()
1723 r.add(['.hgignore', 'series'])
1723 r.add(['.hgignore', 'series'])
1724 commands.add(ui, r)
1724 commands.add(ui, r)
1725 return 0
1725 return 0
1726
1726
1727 def clone(ui, source, dest=None, **opts):
1727 def clone(ui, source, dest=None, **opts):
1728 '''clone main and patch repository at same time
1728 '''clone main and patch repository at same time
1729
1729
1730 If source is local, destination will have no patches applied. If
1730 If source is local, destination will have no patches applied. If
1731 source is remote, this command can not check if patches are
1731 source is remote, this command can not check if patches are
1732 applied in source, so cannot guarantee that patches are not
1732 applied in source, so cannot guarantee that patches are not
1733 applied in destination. If you clone remote repository, be sure
1733 applied in destination. If you clone remote repository, be sure
1734 before that it has no patches applied.
1734 before that it has no patches applied.
1735
1735
1736 Source patch repository is looked for in <src>/.hg/patches by
1736 Source patch repository is looked for in <src>/.hg/patches by
1737 default. Use -p <url> to change.
1737 default. Use -p <url> to change.
1738
1738
1739 The patch directory must be a nested mercurial repository, as
1739 The patch directory must be a nested mercurial repository, as
1740 would be created by qinit -c.
1740 would be created by qinit -c.
1741 '''
1741 '''
1742 def patchdir(repo):
1742 def patchdir(repo):
1743 url = repo.url()
1743 url = repo.url()
1744 if url.endswith('/'):
1744 if url.endswith('/'):
1745 url = url[:-1]
1745 url = url[:-1]
1746 return url + '/.hg/patches'
1746 return url + '/.hg/patches'
1747 cmdutil.setremoteconfig(ui, opts)
1747 cmdutil.setremoteconfig(ui, opts)
1748 if dest is None:
1748 if dest is None:
1749 dest = hg.defaultdest(source)
1749 dest = hg.defaultdest(source)
1750 sr = hg.repository(ui, ui.expandpath(source))
1750 sr = hg.repository(ui, ui.expandpath(source))
1751 if opts['patches']:
1751 if opts['patches']:
1752 patchespath = ui.expandpath(opts['patches'])
1752 patchespath = ui.expandpath(opts['patches'])
1753 else:
1753 else:
1754 patchespath = patchdir(sr)
1754 patchespath = patchdir(sr)
1755 try:
1755 try:
1756 hg.repository(ui, patchespath)
1756 hg.repository(ui, patchespath)
1757 except error.RepoError:
1757 except error.RepoError:
1758 raise util.Abort(_('versioned patch repository not found'
1758 raise util.Abort(_('versioned patch repository not found'
1759 ' (see qinit -c)'))
1759 ' (see qinit -c)'))
1760 qbase, destrev = None, None
1760 qbase, destrev = None, None
1761 if sr.local():
1761 if sr.local():
1762 if sr.mq.applied:
1762 if sr.mq.applied:
1763 qbase = bin(sr.mq.applied[0].rev)
1763 qbase = bin(sr.mq.applied[0].rev)
1764 if not hg.islocal(dest):
1764 if not hg.islocal(dest):
1765 heads = dict.fromkeys(sr.heads())
1765 heads = dict.fromkeys(sr.heads())
1766 for h in sr.heads(qbase):
1766 for h in sr.heads(qbase):
1767 del heads[h]
1767 del heads[h]
1768 destrev = heads.keys()
1768 destrev = heads.keys()
1769 destrev.append(sr.changelog.parents(qbase)[0])
1769 destrev.append(sr.changelog.parents(qbase)[0])
1770 elif sr.capable('lookup'):
1770 elif sr.capable('lookup'):
1771 try:
1771 try:
1772 qbase = sr.lookup('qbase')
1772 qbase = sr.lookup('qbase')
1773 except error.RepoError:
1773 except error.RepoError:
1774 pass
1774 pass
1775 ui.note(_('cloning main repository\n'))
1775 ui.note(_('cloning main repository\n'))
1776 sr, dr = hg.clone(ui, sr.url(), dest,
1776 sr, dr = hg.clone(ui, sr.url(), dest,
1777 pull=opts['pull'],
1777 pull=opts['pull'],
1778 rev=destrev,
1778 rev=destrev,
1779 update=False,
1779 update=False,
1780 stream=opts['uncompressed'])
1780 stream=opts['uncompressed'])
1781 ui.note(_('cloning patch repository\n'))
1781 ui.note(_('cloning patch repository\n'))
1782 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1782 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1783 pull=opts['pull'], update=not opts['noupdate'],
1783 pull=opts['pull'], update=not opts['noupdate'],
1784 stream=opts['uncompressed'])
1784 stream=opts['uncompressed'])
1785 if dr.local():
1785 if dr.local():
1786 if qbase:
1786 if qbase:
1787 ui.note(_('stripping applied patches from destination '
1787 ui.note(_('stripping applied patches from destination '
1788 'repository\n'))
1788 'repository\n'))
1789 dr.mq.strip(dr, qbase, update=False, backup=None)
1789 dr.mq.strip(dr, qbase, update=False, backup=None)
1790 if not opts['noupdate']:
1790 if not opts['noupdate']:
1791 ui.note(_('updating destination repository\n'))
1791 ui.note(_('updating destination repository\n'))
1792 hg.update(dr, dr.changelog.tip())
1792 hg.update(dr, dr.changelog.tip())
1793
1793
1794 def commit(ui, repo, *pats, **opts):
1794 def commit(ui, repo, *pats, **opts):
1795 """commit changes in the queue repository"""
1795 """commit changes in the queue repository"""
1796 q = repo.mq
1796 q = repo.mq
1797 r = q.qrepo()
1797 r = q.qrepo()
1798 if not r: raise util.Abort('no queue repository')
1798 if not r: raise util.Abort('no queue repository')
1799 commands.commit(r.ui, r, *pats, **opts)
1799 commands.commit(r.ui, r, *pats, **opts)
1800
1800
1801 def series(ui, repo, **opts):
1801 def series(ui, repo, **opts):
1802 """print the entire series file"""
1802 """print the entire series file"""
1803 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1803 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1804 return 0
1804 return 0
1805
1805
1806 def top(ui, repo, **opts):
1806 def top(ui, repo, **opts):
1807 """print the name of the current patch"""
1807 """print the name of the current patch"""
1808 q = repo.mq
1808 q = repo.mq
1809 t = q.applied and q.series_end(True) or 0
1809 t = q.applied and q.series_end(True) or 0
1810 if t:
1810 if t:
1811 return q.qseries(repo, start=t-1, length=1, status='A',
1811 return q.qseries(repo, start=t-1, length=1, status='A',
1812 summary=opts.get('summary'))
1812 summary=opts.get('summary'))
1813 else:
1813 else:
1814 ui.write(_("no patches applied\n"))
1814 ui.write(_("no patches applied\n"))
1815 return 1
1815 return 1
1816
1816
1817 def next(ui, repo, **opts):
1817 def next(ui, repo, **opts):
1818 """print the name of the next patch"""
1818 """print the name of the next patch"""
1819 q = repo.mq
1819 q = repo.mq
1820 end = q.series_end()
1820 end = q.series_end()
1821 if end == len(q.series):
1821 if end == len(q.series):
1822 ui.write(_("all patches applied\n"))
1822 ui.write(_("all patches applied\n"))
1823 return 1
1823 return 1
1824 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1824 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1825
1825
1826 def prev(ui, repo, **opts):
1826 def prev(ui, repo, **opts):
1827 """print the name of the previous patch"""
1827 """print the name of the previous patch"""
1828 q = repo.mq
1828 q = repo.mq
1829 l = len(q.applied)
1829 l = len(q.applied)
1830 if l == 1:
1830 if l == 1:
1831 ui.write(_("only one patch applied\n"))
1831 ui.write(_("only one patch applied\n"))
1832 return 1
1832 return 1
1833 if not l:
1833 if not l:
1834 ui.write(_("no patches applied\n"))
1834 ui.write(_("no patches applied\n"))
1835 return 1
1835 return 1
1836 return q.qseries(repo, start=l-2, length=1, status='A',
1836 return q.qseries(repo, start=l-2, length=1, status='A',
1837 summary=opts.get('summary'))
1837 summary=opts.get('summary'))
1838
1838
1839 def setupheaderopts(ui, opts):
1839 def setupheaderopts(ui, opts):
1840 def do(opt,val):
1840 def do(opt,val):
1841 if not opts[opt] and opts['current' + opt]:
1841 if not opts[opt] and opts['current' + opt]:
1842 opts[opt] = val
1842 opts[opt] = val
1843 do('user', ui.username())
1843 do('user', ui.username())
1844 do('date', "%d %d" % util.makedate())
1844 do('date', "%d %d" % util.makedate())
1845
1845
1846 def new(ui, repo, patch, *args, **opts):
1846 def new(ui, repo, patch, *args, **opts):
1847 """create a new patch
1847 """create a new patch
1848
1848
1849 qnew creates a new patch on top of the currently-applied patch (if
1849 qnew creates a new patch on top of the currently-applied patch (if
1850 any). It will refuse to run if there are any outstanding changes
1850 any). It will refuse to run if there are any outstanding changes
1851 unless -f/--force is specified, in which case the patch will be
1851 unless -f/--force is specified, in which case the patch will be
1852 initialized with them. You may also use -I/--include,
1852 initialized with them. You may also use -I/--include,
1853 -X/--exclude, and/or a list of files after the patch name to add
1853 -X/--exclude, and/or a list of files after the patch name to add
1854 only changes to matching files to the new patch, leaving the rest
1854 only changes to matching files to the new patch, leaving the rest
1855 as uncommitted modifications.
1855 as uncommitted modifications.
1856
1856
1857 -u/--user and -d/--date can be used to set the (given) user and
1857 -u/--user and -d/--date can be used to set the (given) user and
1858 date, respectively. -U/--currentuser and -D/--currentdate set user
1858 date, respectively. -U/--currentuser and -D/--currentdate set user
1859 to current user and date to current date.
1859 to current user and date to current date.
1860
1860
1861 -e/--edit, -m/--message or -l/--logfile set the patch header as
1861 -e/--edit, -m/--message or -l/--logfile set the patch header as
1862 well as the commit message. If none is specified, the header is
1862 well as the commit message. If none is specified, the header is
1863 empty and the commit message is '[mq]: PATCH'.
1863 empty and the commit message is '[mq]: PATCH'.
1864
1864
1865 Use the -g/--git option to keep the patch in the git extended diff
1865 Use the -g/--git option to keep the patch in the git extended diff
1866 format. Read the diffs help topic for more information on why this
1866 format. Read the diffs help topic for more information on why this
1867 is important for preserving permission changes and copy/rename
1867 is important for preserving permission changes and copy/rename
1868 information.
1868 information.
1869 """
1869 """
1870 msg = cmdutil.logmessage(opts)
1870 msg = cmdutil.logmessage(opts)
1871 def getmsg(): return ui.edit(msg, ui.username())
1871 def getmsg(): return ui.edit(msg, ui.username())
1872 q = repo.mq
1872 q = repo.mq
1873 opts['msg'] = msg
1873 opts['msg'] = msg
1874 if opts.get('edit'):
1874 if opts.get('edit'):
1875 opts['msg'] = getmsg
1875 opts['msg'] = getmsg
1876 else:
1876 else:
1877 opts['msg'] = msg
1877 opts['msg'] = msg
1878 setupheaderopts(ui, opts)
1878 setupheaderopts(ui, opts)
1879 q.new(repo, patch, *args, **opts)
1879 q.new(repo, patch, *args, **opts)
1880 q.save_dirty()
1880 q.save_dirty()
1881 return 0
1881 return 0
1882
1882
1883 def refresh(ui, repo, *pats, **opts):
1883 def refresh(ui, repo, *pats, **opts):
1884 """update the current patch
1884 """update the current patch
1885
1885
1886 If any file patterns are provided, the refreshed patch will
1886 If any file patterns are provided, the refreshed patch will
1887 contain only the modifications that match those patterns; the
1887 contain only the modifications that match those patterns; the
1888 remaining modifications will remain in the working directory.
1888 remaining modifications will remain in the working directory.
1889
1889
1890 If -s/--short is specified, files currently included in the patch
1890 If -s/--short is specified, files currently included in the patch
1891 will be refreshed just like matched files and remain in the patch.
1891 will be refreshed just like matched files and remain in the patch.
1892
1892
1893 hg add/remove/copy/rename work as usual, though you might want to
1893 hg add/remove/copy/rename work as usual, though you might want to
1894 use git-style patches (-g/--git or [diff] git=1) to track copies
1894 use git-style patches (-g/--git or [diff] git=1) to track copies
1895 and renames. See the diffs help topic for more information on the
1895 and renames. See the diffs help topic for more information on the
1896 git diff format.
1896 git diff format.
1897 """
1897 """
1898 q = repo.mq
1898 q = repo.mq
1899 message = cmdutil.logmessage(opts)
1899 message = cmdutil.logmessage(opts)
1900 if opts['edit']:
1900 if opts['edit']:
1901 if not q.applied:
1901 if not q.applied:
1902 ui.write(_("no patches applied\n"))
1902 ui.write(_("no patches applied\n"))
1903 return 1
1903 return 1
1904 if message:
1904 if message:
1905 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1905 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1906 patch = q.applied[-1].name
1906 patch = q.applied[-1].name
1907 ph = q.readheaders(patch)
1907 ph = q.readheaders(patch)
1908 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1908 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1909 setupheaderopts(ui, opts)
1909 setupheaderopts(ui, opts)
1910 ret = q.refresh(repo, pats, msg=message, **opts)
1910 ret = q.refresh(repo, pats, msg=message, **opts)
1911 q.save_dirty()
1911 q.save_dirty()
1912 return ret
1912 return ret
1913
1913
1914 def diff(ui, repo, *pats, **opts):
1914 def diff(ui, repo, *pats, **opts):
1915 """diff of the current patch and subsequent modifications
1915 """diff of the current patch and subsequent modifications
1916
1916
1917 Shows a diff which includes the current patch as well as any
1917 Shows a diff which includes the current patch as well as any
1918 changes which have been made in the working directory since the
1918 changes which have been made in the working directory since the
1919 last refresh (thus showing what the current patch would become
1919 last refresh (thus showing what the current patch would become
1920 after a qrefresh).
1920 after a qrefresh).
1921
1921
1922 Use 'hg diff' if you only want to see the changes made since the
1922 Use 'hg diff' if you only want to see the changes made since the
1923 last qrefresh, or 'hg export qtip' if you want to see changes made
1923 last qrefresh, or 'hg export qtip' if you want to see changes made
1924 by the current patch without including changes made since the
1924 by the current patch without including changes made since the
1925 qrefresh.
1925 qrefresh.
1926 """
1926 """
1927 repo.mq.diff(repo, pats, opts)
1927 repo.mq.diff(repo, pats, opts)
1928 return 0
1928 return 0
1929
1929
1930 def fold(ui, repo, *files, **opts):
1930 def fold(ui, repo, *files, **opts):
1931 """fold the named patches into the current patch
1931 """fold the named patches into the current patch
1932
1932
1933 Patches must not yet be applied. Each patch will be successively
1933 Patches must not yet be applied. Each patch will be successively
1934 applied to the current patch in the order given. If all the
1934 applied to the current patch in the order given. If all the
1935 patches apply successfully, the current patch will be refreshed
1935 patches apply successfully, the current patch will be refreshed
1936 with the new cumulative patch, and the folded patches will be
1936 with the new cumulative patch, and the folded patches will be
1937 deleted. With -k/--keep, the folded patch files will not be
1937 deleted. With -k/--keep, the folded patch files will not be
1938 removed afterwards.
1938 removed afterwards.
1939
1939
1940 The header for each folded patch will be concatenated with the
1940 The header for each folded patch will be concatenated with the
1941 current patch header, separated by a line of '* * *'."""
1941 current patch header, separated by a line of '* * *'."""
1942
1942
1943 q = repo.mq
1943 q = repo.mq
1944
1944
1945 if not files:
1945 if not files:
1946 raise util.Abort(_('qfold requires at least one patch name'))
1946 raise util.Abort(_('qfold requires at least one patch name'))
1947 if not q.check_toppatch(repo):
1947 if not q.check_toppatch(repo):
1948 raise util.Abort(_('No patches applied'))
1948 raise util.Abort(_('No patches applied'))
1949
1949
1950 message = cmdutil.logmessage(opts)
1950 message = cmdutil.logmessage(opts)
1951 if opts['edit']:
1951 if opts['edit']:
1952 if message:
1952 if message:
1953 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1953 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1954
1954
1955 parent = q.lookup('qtip')
1955 parent = q.lookup('qtip')
1956 patches = []
1956 patches = []
1957 messages = []
1957 messages = []
1958 for f in files:
1958 for f in files:
1959 p = q.lookup(f)
1959 p = q.lookup(f)
1960 if p in patches or p == parent:
1960 if p in patches or p == parent:
1961 ui.warn(_('Skipping already folded patch %s') % p)
1961 ui.warn(_('Skipping already folded patch %s') % p)
1962 if q.isapplied(p):
1962 if q.isapplied(p):
1963 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1963 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1964 patches.append(p)
1964 patches.append(p)
1965
1965
1966 for p in patches:
1966 for p in patches:
1967 if not message:
1967 if not message:
1968 ph = q.readheaders(p)
1968 ph = q.readheaders(p)
1969 if ph.message:
1969 if ph.message:
1970 messages.append(ph.message)
1970 messages.append(ph.message)
1971 pf = q.join(p)
1971 pf = q.join(p)
1972 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1972 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1973 if not patchsuccess:
1973 if not patchsuccess:
1974 raise util.Abort(_('Error folding patch %s') % p)
1974 raise util.Abort(_('Error folding patch %s') % p)
1975 patch.updatedir(ui, repo, files)
1975 patch.updatedir(ui, repo, files)
1976
1976
1977 if not message:
1977 if not message:
1978 ph = q.readheaders(parent)
1978 ph = q.readheaders(parent)
1979 message, user = ph.message, ph.user
1979 message, user = ph.message, ph.user
1980 for msg in messages:
1980 for msg in messages:
1981 message.append('* * *')
1981 message.append('* * *')
1982 message.extend(msg)
1982 message.extend(msg)
1983 message = '\n'.join(message)
1983 message = '\n'.join(message)
1984
1984
1985 if opts['edit']:
1985 if opts['edit']:
1986 message = ui.edit(message, user or ui.username())
1986 message = ui.edit(message, user or ui.username())
1987
1987
1988 q.refresh(repo, msg=message)
1988 q.refresh(repo, msg=message)
1989 q.delete(repo, patches, opts)
1989 q.delete(repo, patches, opts)
1990 q.save_dirty()
1990 q.save_dirty()
1991
1991
1992 def goto(ui, repo, patch, **opts):
1992 def goto(ui, repo, patch, **opts):
1993 '''push or pop patches until named patch is at top of stack'''
1993 '''push or pop patches until named patch is at top of stack'''
1994 q = repo.mq
1994 q = repo.mq
1995 patch = q.lookup(patch)
1995 patch = q.lookup(patch)
1996 if q.isapplied(patch):
1996 if q.isapplied(patch):
1997 ret = q.pop(repo, patch, force=opts['force'])
1997 ret = q.pop(repo, patch, force=opts['force'])
1998 else:
1998 else:
1999 ret = q.push(repo, patch, force=opts['force'])
1999 ret = q.push(repo, patch, force=opts['force'])
2000 q.save_dirty()
2000 q.save_dirty()
2001 return ret
2001 return ret
2002
2002
2003 def guard(ui, repo, *args, **opts):
2003 def guard(ui, repo, *args, **opts):
2004 '''set or print guards for a patch
2004 '''set or print guards for a patch
2005
2005
2006 Guards control whether a patch can be pushed. A patch with no
2006 Guards control whether a patch can be pushed. A patch with no
2007 guards is always pushed. A patch with a positive guard ("+foo") is
2007 guards is always pushed. A patch with a positive guard ("+foo") is
2008 pushed only if the qselect command has activated it. A patch with
2008 pushed only if the qselect command has activated it. A patch with
2009 a negative guard ("-foo") is never pushed if the qselect command
2009 a negative guard ("-foo") is never pushed if the qselect command
2010 has activated it.
2010 has activated it.
2011
2011
2012 With no arguments, print the currently active guards.
2012 With no arguments, print the currently active guards.
2013 With arguments, set guards for the named patch.
2013 With arguments, set guards for the named patch.
2014 NOTE: Specifying negative guards now requires '--'.
2014 NOTE: Specifying negative guards now requires '--'.
2015
2015
2016 To set guards on another patch:
2016 To set guards on another patch:
2017 hg qguard -- other.patch +2.6.17 -stable
2017 hg qguard -- other.patch +2.6.17 -stable
2018 '''
2018 '''
2019 def status(idx):
2019 def status(idx):
2020 guards = q.series_guards[idx] or ['unguarded']
2020 guards = q.series_guards[idx] or ['unguarded']
2021 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2021 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2022 q = repo.mq
2022 q = repo.mq
2023 patch = None
2023 patch = None
2024 args = list(args)
2024 args = list(args)
2025 if opts['list']:
2025 if opts['list']:
2026 if args or opts['none']:
2026 if args or opts['none']:
2027 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2027 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2028 for i in xrange(len(q.series)):
2028 for i in xrange(len(q.series)):
2029 status(i)
2029 status(i)
2030 return
2030 return
2031 if not args or args[0][0:1] in '-+':
2031 if not args or args[0][0:1] in '-+':
2032 if not q.applied:
2032 if not q.applied:
2033 raise util.Abort(_('no patches applied'))
2033 raise util.Abort(_('no patches applied'))
2034 patch = q.applied[-1].name
2034 patch = q.applied[-1].name
2035 if patch is None and args[0][0:1] not in '-+':
2035 if patch is None and args[0][0:1] not in '-+':
2036 patch = args.pop(0)
2036 patch = args.pop(0)
2037 if patch is None:
2037 if patch is None:
2038 raise util.Abort(_('no patch to work with'))
2038 raise util.Abort(_('no patch to work with'))
2039 if args or opts['none']:
2039 if args or opts['none']:
2040 idx = q.find_series(patch)
2040 idx = q.find_series(patch)
2041 if idx is None:
2041 if idx is None:
2042 raise util.Abort(_('no patch named %s') % patch)
2042 raise util.Abort(_('no patch named %s') % patch)
2043 q.set_guards(idx, args)
2043 q.set_guards(idx, args)
2044 q.save_dirty()
2044 q.save_dirty()
2045 else:
2045 else:
2046 status(q.series.index(q.lookup(patch)))
2046 status(q.series.index(q.lookup(patch)))
2047
2047
2048 def header(ui, repo, patch=None):
2048 def header(ui, repo, patch=None):
2049 """print the header of the topmost or specified patch"""
2049 """print the header of the topmost or specified patch"""
2050 q = repo.mq
2050 q = repo.mq
2051
2051
2052 if patch:
2052 if patch:
2053 patch = q.lookup(patch)
2053 patch = q.lookup(patch)
2054 else:
2054 else:
2055 if not q.applied:
2055 if not q.applied:
2056 ui.write('no patches applied\n')
2056 ui.write('no patches applied\n')
2057 return 1
2057 return 1
2058 patch = q.lookup('qtip')
2058 patch = q.lookup('qtip')
2059 ph = repo.mq.readheaders(patch)
2059 ph = repo.mq.readheaders(patch)
2060
2060
2061 ui.write('\n'.join(ph.message) + '\n')
2061 ui.write('\n'.join(ph.message) + '\n')
2062
2062
2063 def lastsavename(path):
2063 def lastsavename(path):
2064 (directory, base) = os.path.split(path)
2064 (directory, base) = os.path.split(path)
2065 names = os.listdir(directory)
2065 names = os.listdir(directory)
2066 namere = re.compile("%s.([0-9]+)" % base)
2066 namere = re.compile("%s.([0-9]+)" % base)
2067 maxindex = None
2067 maxindex = None
2068 maxname = None
2068 maxname = None
2069 for f in names:
2069 for f in names:
2070 m = namere.match(f)
2070 m = namere.match(f)
2071 if m:
2071 if m:
2072 index = int(m.group(1))
2072 index = int(m.group(1))
2073 if maxindex == None or index > maxindex:
2073 if maxindex == None or index > maxindex:
2074 maxindex = index
2074 maxindex = index
2075 maxname = f
2075 maxname = f
2076 if maxname:
2076 if maxname:
2077 return (os.path.join(directory, maxname), maxindex)
2077 return (os.path.join(directory, maxname), maxindex)
2078 return (None, None)
2078 return (None, None)
2079
2079
2080 def savename(path):
2080 def savename(path):
2081 (last, index) = lastsavename(path)
2081 (last, index) = lastsavename(path)
2082 if last is None:
2082 if last is None:
2083 index = 0
2083 index = 0
2084 newpath = path + ".%d" % (index + 1)
2084 newpath = path + ".%d" % (index + 1)
2085 return newpath
2085 return newpath
2086
2086
2087 def push(ui, repo, patch=None, **opts):
2087 def push(ui, repo, patch=None, **opts):
2088 """push the next patch onto the stack
2088 """push the next patch onto the stack
2089
2089
2090 When -f/--force is applied, all local changes in patched files
2090 When -f/--force is applied, all local changes in patched files
2091 will be lost.
2091 will be lost.
2092 """
2092 """
2093 q = repo.mq
2093 q = repo.mq
2094 mergeq = None
2094 mergeq = None
2095
2095
2096 if opts['merge']:
2096 if opts['merge']:
2097 if opts['name']:
2097 if opts['name']:
2098 newpath = repo.join(opts['name'])
2098 newpath = repo.join(opts['name'])
2099 else:
2099 else:
2100 newpath, i = lastsavename(q.path)
2100 newpath, i = lastsavename(q.path)
2101 if not newpath:
2101 if not newpath:
2102 ui.warn(_("no saved queues found, please use -n\n"))
2102 ui.warn(_("no saved queues found, please use -n\n"))
2103 return 1
2103 return 1
2104 mergeq = queue(ui, repo.join(""), newpath)
2104 mergeq = queue(ui, repo.join(""), newpath)
2105 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2105 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2106 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2106 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2107 mergeq=mergeq, all=opts.get('all'))
2107 mergeq=mergeq, all=opts.get('all'))
2108 return ret
2108 return ret
2109
2109
2110 def pop(ui, repo, patch=None, **opts):
2110 def pop(ui, repo, patch=None, **opts):
2111 """pop the current patch off the stack
2111 """pop the current patch off the stack
2112
2112
2113 By default, pops off the top of the patch stack. If given a patch
2113 By default, pops off the top of the patch stack. If given a patch
2114 name, keeps popping off patches until the named patch is at the
2114 name, keeps popping off patches until the named patch is at the
2115 top of the stack.
2115 top of the stack.
2116 """
2116 """
2117 localupdate = True
2117 localupdate = True
2118 if opts['name']:
2118 if opts['name']:
2119 q = queue(ui, repo.join(""), repo.join(opts['name']))
2119 q = queue(ui, repo.join(""), repo.join(opts['name']))
2120 ui.warn(_('using patch queue: %s\n') % q.path)
2120 ui.warn(_('using patch queue: %s\n') % q.path)
2121 localupdate = False
2121 localupdate = False
2122 else:
2122 else:
2123 q = repo.mq
2123 q = repo.mq
2124 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2124 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2125 all=opts['all'])
2125 all=opts['all'])
2126 q.save_dirty()
2126 q.save_dirty()
2127 return ret
2127 return ret
2128
2128
2129 def rename(ui, repo, patch, name=None, **opts):
2129 def rename(ui, repo, patch, name=None, **opts):
2130 """rename a patch
2130 """rename a patch
2131
2131
2132 With one argument, renames the current patch to PATCH1.
2132 With one argument, renames the current patch to PATCH1.
2133 With two arguments, renames PATCH1 to PATCH2."""
2133 With two arguments, renames PATCH1 to PATCH2."""
2134
2134
2135 q = repo.mq
2135 q = repo.mq
2136
2136
2137 if not name:
2137 if not name:
2138 name = patch
2138 name = patch
2139 patch = None
2139 patch = None
2140
2140
2141 if patch:
2141 if patch:
2142 patch = q.lookup(patch)
2142 patch = q.lookup(patch)
2143 else:
2143 else:
2144 if not q.applied:
2144 if not q.applied:
2145 ui.write(_('no patches applied\n'))
2145 ui.write(_('no patches applied\n'))
2146 return
2146 return
2147 patch = q.lookup('qtip')
2147 patch = q.lookup('qtip')
2148 absdest = q.join(name)
2148 absdest = q.join(name)
2149 if os.path.isdir(absdest):
2149 if os.path.isdir(absdest):
2150 name = normname(os.path.join(name, os.path.basename(patch)))
2150 name = normname(os.path.join(name, os.path.basename(patch)))
2151 absdest = q.join(name)
2151 absdest = q.join(name)
2152 if os.path.exists(absdest):
2152 if os.path.exists(absdest):
2153 raise util.Abort(_('%s already exists') % absdest)
2153 raise util.Abort(_('%s already exists') % absdest)
2154
2154
2155 if name in q.series:
2155 if name in q.series:
2156 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2156 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2157
2157
2158 if ui.verbose:
2158 if ui.verbose:
2159 ui.write('renaming %s to %s\n' % (patch, name))
2159 ui.write('renaming %s to %s\n' % (patch, name))
2160 i = q.find_series(patch)
2160 i = q.find_series(patch)
2161 guards = q.guard_re.findall(q.full_series[i])
2161 guards = q.guard_re.findall(q.full_series[i])
2162 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2162 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2163 q.parse_series()
2163 q.parse_series()
2164 q.series_dirty = 1
2164 q.series_dirty = 1
2165
2165
2166 info = q.isapplied(patch)
2166 info = q.isapplied(patch)
2167 if info:
2167 if info:
2168 q.applied[info[0]] = statusentry(info[1], name)
2168 q.applied[info[0]] = statusentry(info[1], name)
2169 q.applied_dirty = 1
2169 q.applied_dirty = 1
2170
2170
2171 util.rename(q.join(patch), absdest)
2171 util.rename(q.join(patch), absdest)
2172 r = q.qrepo()
2172 r = q.qrepo()
2173 if r:
2173 if r:
2174 wlock = r.wlock()
2174 wlock = r.wlock()
2175 try:
2175 try:
2176 if r.dirstate[patch] == 'a':
2176 if r.dirstate[patch] == 'a':
2177 r.dirstate.forget(patch)
2177 r.dirstate.forget(patch)
2178 r.dirstate.add(name)
2178 r.dirstate.add(name)
2179 else:
2179 else:
2180 if r.dirstate[name] == 'r':
2180 if r.dirstate[name] == 'r':
2181 r.undelete([name])
2181 r.undelete([name])
2182 r.copy(patch, name)
2182 r.copy(patch, name)
2183 r.remove([patch], False)
2183 r.remove([patch], False)
2184 finally:
2184 finally:
2185 wlock.release()
2185 wlock.release()
2186
2186
2187 q.save_dirty()
2187 q.save_dirty()
2188
2188
2189 def restore(ui, repo, rev, **opts):
2189 def restore(ui, repo, rev, **opts):
2190 """restore the queue state saved by a revision"""
2190 """restore the queue state saved by a revision"""
2191 rev = repo.lookup(rev)
2191 rev = repo.lookup(rev)
2192 q = repo.mq
2192 q = repo.mq
2193 q.restore(repo, rev, delete=opts['delete'],
2193 q.restore(repo, rev, delete=opts['delete'],
2194 qupdate=opts['update'])
2194 qupdate=opts['update'])
2195 q.save_dirty()
2195 q.save_dirty()
2196 return 0
2196 return 0
2197
2197
2198 def save(ui, repo, **opts):
2198 def save(ui, repo, **opts):
2199 """save current queue state"""
2199 """save current queue state"""
2200 q = repo.mq
2200 q = repo.mq
2201 message = cmdutil.logmessage(opts)
2201 message = cmdutil.logmessage(opts)
2202 ret = q.save(repo, msg=message)
2202 ret = q.save(repo, msg=message)
2203 if ret:
2203 if ret:
2204 return ret
2204 return ret
2205 q.save_dirty()
2205 q.save_dirty()
2206 if opts['copy']:
2206 if opts['copy']:
2207 path = q.path
2207 path = q.path
2208 if opts['name']:
2208 if opts['name']:
2209 newpath = os.path.join(q.basepath, opts['name'])
2209 newpath = os.path.join(q.basepath, opts['name'])
2210 if os.path.exists(newpath):
2210 if os.path.exists(newpath):
2211 if not os.path.isdir(newpath):
2211 if not os.path.isdir(newpath):
2212 raise util.Abort(_('destination %s exists and is not '
2212 raise util.Abort(_('destination %s exists and is not '
2213 'a directory') % newpath)
2213 'a directory') % newpath)
2214 if not opts['force']:
2214 if not opts['force']:
2215 raise util.Abort(_('destination %s exists, '
2215 raise util.Abort(_('destination %s exists, '
2216 'use -f to force') % newpath)
2216 'use -f to force') % newpath)
2217 else:
2217 else:
2218 newpath = savename(path)
2218 newpath = savename(path)
2219 ui.warn(_("copy %s to %s\n") % (path, newpath))
2219 ui.warn(_("copy %s to %s\n") % (path, newpath))
2220 util.copyfiles(path, newpath)
2220 util.copyfiles(path, newpath)
2221 if opts['empty']:
2221 if opts['empty']:
2222 try:
2222 try:
2223 os.unlink(q.join(q.status_path))
2223 os.unlink(q.join(q.status_path))
2224 except:
2224 except:
2225 pass
2225 pass
2226 return 0
2226 return 0
2227
2227
2228 def strip(ui, repo, rev, **opts):
2228 def strip(ui, repo, rev, **opts):
2229 """strip a revision and all its descendants from the repository
2229 """strip a revision and all its descendants from the repository
2230
2230
2231 If one of the working directory's parent revisions is stripped, the
2231 If one of the working directory's parent revisions is stripped, the
2232 working directory will be updated to the parent of the stripped
2232 working directory will be updated to the parent of the stripped
2233 revision.
2233 revision.
2234 """
2234 """
2235 backup = 'all'
2235 backup = 'all'
2236 if opts['backup']:
2236 if opts['backup']:
2237 backup = 'strip'
2237 backup = 'strip'
2238 elif opts['nobackup']:
2238 elif opts['nobackup']:
2239 backup = 'none'
2239 backup = 'none'
2240
2240
2241 rev = repo.lookup(rev)
2241 rev = repo.lookup(rev)
2242 p = repo.dirstate.parents()
2242 p = repo.dirstate.parents()
2243 cl = repo.changelog
2243 cl = repo.changelog
2244 update = True
2244 update = True
2245 if p[0] == nullid:
2245 if p[0] == nullid:
2246 update = False
2246 update = False
2247 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2247 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2248 update = False
2248 update = False
2249 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2249 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2250 update = False
2250 update = False
2251
2251
2252 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2252 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2253 return 0
2253 return 0
2254
2254
2255 def select(ui, repo, *args, **opts):
2255 def select(ui, repo, *args, **opts):
2256 '''set or print guarded patches to push
2256 '''set or print guarded patches to push
2257
2257
2258 Use the qguard command to set or print guards on patch, then use
2258 Use the qguard command to set or print guards on patch, then use
2259 qselect to tell mq which guards to use. A patch will be pushed if
2259 qselect to tell mq which guards to use. A patch will be pushed if
2260 it has no guards or any positive guards match the currently
2260 it has no guards or any positive guards match the currently
2261 selected guard, but will not be pushed if any negative guards
2261 selected guard, but will not be pushed if any negative guards
2262 match the current guard. For example:
2262 match the current guard. For example:
2263
2263
2264 qguard foo.patch -stable (negative guard)
2264 qguard foo.patch -stable (negative guard)
2265 qguard bar.patch +stable (positive guard)
2265 qguard bar.patch +stable (positive guard)
2266 qselect stable
2266 qselect stable
2267
2267
2268 This activates the "stable" guard. mq will skip foo.patch (because
2268 This activates the "stable" guard. mq will skip foo.patch (because
2269 it has a negative match) but push bar.patch (because it has a
2269 it has a negative match) but push bar.patch (because it has a
2270 positive match).
2270 positive match).
2271
2271
2272 With no arguments, prints the currently active guards.
2272 With no arguments, prints the currently active guards.
2273 With one argument, sets the active guard.
2273 With one argument, sets the active guard.
2274
2274
2275 Use -n/--none to deactivate guards (no other arguments needed).
2275 Use -n/--none to deactivate guards (no other arguments needed).
2276 When no guards are active, patches with positive guards are
2276 When no guards are active, patches with positive guards are
2277 skipped and patches with negative guards are pushed.
2277 skipped and patches with negative guards are pushed.
2278
2278
2279 qselect can change the guards on applied patches. It does not pop
2279 qselect can change the guards on applied patches. It does not pop
2280 guarded patches by default. Use --pop to pop back to the last
2280 guarded patches by default. Use --pop to pop back to the last
2281 applied patch that is not guarded. Use --reapply (which implies
2281 applied patch that is not guarded. Use --reapply (which implies
2282 --pop) to push back to the current patch afterwards, but skip
2282 --pop) to push back to the current patch afterwards, but skip
2283 guarded patches.
2283 guarded patches.
2284
2284
2285 Use -s/--series to print a list of all guards in the series file
2285 Use -s/--series to print a list of all guards in the series file
2286 (no other arguments needed). Use -v for more information.'''
2286 (no other arguments needed). Use -v for more information.'''
2287
2287
2288 q = repo.mq
2288 q = repo.mq
2289 guards = q.active()
2289 guards = q.active()
2290 if args or opts['none']:
2290 if args or opts['none']:
2291 old_unapplied = q.unapplied(repo)
2291 old_unapplied = q.unapplied(repo)
2292 old_guarded = [i for i in xrange(len(q.applied)) if
2292 old_guarded = [i for i in xrange(len(q.applied)) if
2293 not q.pushable(i)[0]]
2293 not q.pushable(i)[0]]
2294 q.set_active(args)
2294 q.set_active(args)
2295 q.save_dirty()
2295 q.save_dirty()
2296 if not args:
2296 if not args:
2297 ui.status(_('guards deactivated\n'))
2297 ui.status(_('guards deactivated\n'))
2298 if not opts['pop'] and not opts['reapply']:
2298 if not opts['pop'] and not opts['reapply']:
2299 unapplied = q.unapplied(repo)
2299 unapplied = q.unapplied(repo)
2300 guarded = [i for i in xrange(len(q.applied))
2300 guarded = [i for i in xrange(len(q.applied))
2301 if not q.pushable(i)[0]]
2301 if not q.pushable(i)[0]]
2302 if len(unapplied) != len(old_unapplied):
2302 if len(unapplied) != len(old_unapplied):
2303 ui.status(_('number of unguarded, unapplied patches has '
2303 ui.status(_('number of unguarded, unapplied patches has '
2304 'changed from %d to %d\n') %
2304 'changed from %d to %d\n') %
2305 (len(old_unapplied), len(unapplied)))
2305 (len(old_unapplied), len(unapplied)))
2306 if len(guarded) != len(old_guarded):
2306 if len(guarded) != len(old_guarded):
2307 ui.status(_('number of guarded, applied patches has changed '
2307 ui.status(_('number of guarded, applied patches has changed '
2308 'from %d to %d\n') %
2308 'from %d to %d\n') %
2309 (len(old_guarded), len(guarded)))
2309 (len(old_guarded), len(guarded)))
2310 elif opts['series']:
2310 elif opts['series']:
2311 guards = {}
2311 guards = {}
2312 noguards = 0
2312 noguards = 0
2313 for gs in q.series_guards:
2313 for gs in q.series_guards:
2314 if not gs:
2314 if not gs:
2315 noguards += 1
2315 noguards += 1
2316 for g in gs:
2316 for g in gs:
2317 guards.setdefault(g, 0)
2317 guards.setdefault(g, 0)
2318 guards[g] += 1
2318 guards[g] += 1
2319 if ui.verbose:
2319 if ui.verbose:
2320 guards['NONE'] = noguards
2320 guards['NONE'] = noguards
2321 guards = guards.items()
2321 guards = guards.items()
2322 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2322 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2323 if guards:
2323 if guards:
2324 ui.note(_('guards in series file:\n'))
2324 ui.note(_('guards in series file:\n'))
2325 for guard, count in guards:
2325 for guard, count in guards:
2326 ui.note('%2d ' % count)
2326 ui.note('%2d ' % count)
2327 ui.write(guard, '\n')
2327 ui.write(guard, '\n')
2328 else:
2328 else:
2329 ui.note(_('no guards in series file\n'))
2329 ui.note(_('no guards in series file\n'))
2330 else:
2330 else:
2331 if guards:
2331 if guards:
2332 ui.note(_('active guards:\n'))
2332 ui.note(_('active guards:\n'))
2333 for g in guards:
2333 for g in guards:
2334 ui.write(g, '\n')
2334 ui.write(g, '\n')
2335 else:
2335 else:
2336 ui.write(_('no active guards\n'))
2336 ui.write(_('no active guards\n'))
2337 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2337 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2338 popped = False
2338 popped = False
2339 if opts['pop'] or opts['reapply']:
2339 if opts['pop'] or opts['reapply']:
2340 for i in xrange(len(q.applied)):
2340 for i in xrange(len(q.applied)):
2341 pushable, reason = q.pushable(i)
2341 pushable, reason = q.pushable(i)
2342 if not pushable:
2342 if not pushable:
2343 ui.status(_('popping guarded patches\n'))
2343 ui.status(_('popping guarded patches\n'))
2344 popped = True
2344 popped = True
2345 if i == 0:
2345 if i == 0:
2346 q.pop(repo, all=True)
2346 q.pop(repo, all=True)
2347 else:
2347 else:
2348 q.pop(repo, i-1)
2348 q.pop(repo, i-1)
2349 break
2349 break
2350 if popped:
2350 if popped:
2351 try:
2351 try:
2352 if reapply:
2352 if reapply:
2353 ui.status(_('reapplying unguarded patches\n'))
2353 ui.status(_('reapplying unguarded patches\n'))
2354 q.push(repo, reapply)
2354 q.push(repo, reapply)
2355 finally:
2355 finally:
2356 q.save_dirty()
2356 q.save_dirty()
2357
2357
2358 def finish(ui, repo, *revrange, **opts):
2358 def finish(ui, repo, *revrange, **opts):
2359 """move applied patches into repository history
2359 """move applied patches into repository history
2360
2360
2361 Finishes the specified revisions (corresponding to applied
2361 Finishes the specified revisions (corresponding to applied
2362 patches) by moving them out of mq control into regular repository
2362 patches) by moving them out of mq control into regular repository
2363 history.
2363 history.
2364
2364
2365 Accepts a revision range or the -a/--applied option. If --applied
2365 Accepts a revision range or the -a/--applied option. If --applied
2366 is specified, all applied mq revisions are removed from mq
2366 is specified, all applied mq revisions are removed from mq
2367 control. Otherwise, the given revisions must be at the base of the
2367 control. Otherwise, the given revisions must be at the base of the
2368 stack of applied patches.
2368 stack of applied patches.
2369
2369
2370 This can be especially useful if your changes have been applied to
2370 This can be especially useful if your changes have been applied to
2371 an upstream repository, or if you are about to push your changes
2371 an upstream repository, or if you are about to push your changes
2372 to upstream.
2372 to upstream.
2373 """
2373 """
2374 if not opts['applied'] and not revrange:
2374 if not opts['applied'] and not revrange:
2375 raise util.Abort(_('no revisions specified'))
2375 raise util.Abort(_('no revisions specified'))
2376 elif opts['applied']:
2376 elif opts['applied']:
2377 revrange = ('qbase:qtip',) + revrange
2377 revrange = ('qbase:qtip',) + revrange
2378
2378
2379 q = repo.mq
2379 q = repo.mq
2380 if not q.applied:
2380 if not q.applied:
2381 ui.status(_('no patches applied\n'))
2381 ui.status(_('no patches applied\n'))
2382 return 0
2382 return 0
2383
2383
2384 revs = cmdutil.revrange(repo, revrange)
2384 revs = cmdutil.revrange(repo, revrange)
2385 q.finish(repo, revs)
2385 q.finish(repo, revs)
2386 q.save_dirty()
2386 q.save_dirty()
2387 return 0
2387 return 0
2388
2388
2389 def reposetup(ui, repo):
2389 def reposetup(ui, repo):
2390 class mqrepo(repo.__class__):
2390 class mqrepo(repo.__class__):
2391 def abort_if_wdir_patched(self, errmsg, force=False):
2391 def abort_if_wdir_patched(self, errmsg, force=False):
2392 if self.mq.applied and not force:
2392 if self.mq.applied and not force:
2393 parent = hex(self.dirstate.parents()[0])
2393 parent = hex(self.dirstate.parents()[0])
2394 if parent in [s.rev for s in self.mq.applied]:
2394 if parent in [s.rev for s in self.mq.applied]:
2395 raise util.Abort(errmsg)
2395 raise util.Abort(errmsg)
2396
2396
2397 def commit(self, *args, **opts):
2397 def commit(self, *args, **opts):
2398 if len(args) >= 6:
2398 if len(args) >= 6:
2399 force = args[5]
2399 force = args[5]
2400 else:
2400 else:
2401 force = opts.get('force')
2401 force = opts.get('force')
2402 self.abort_if_wdir_patched(
2402 self.abort_if_wdir_patched(
2403 _('cannot commit over an applied mq patch'),
2403 _('cannot commit over an applied mq patch'),
2404 force)
2404 force)
2405
2405
2406 return super(mqrepo, self).commit(*args, **opts)
2406 return super(mqrepo, self).commit(*args, **opts)
2407
2407
2408 def push(self, remote, force=False, revs=None):
2408 def push(self, remote, force=False, revs=None):
2409 if self.mq.applied and not force and not revs:
2409 if self.mq.applied and not force and not revs:
2410 raise util.Abort(_('source has mq patches applied'))
2410 raise util.Abort(_('source has mq patches applied'))
2411 return super(mqrepo, self).push(remote, force, revs)
2411 return super(mqrepo, self).push(remote, force, revs)
2412
2412
2413 def tags(self):
2413 def tags(self):
2414 if self.tagscache:
2414 if self.tagscache:
2415 return self.tagscache
2415 return self.tagscache
2416
2416
2417 tagscache = super(mqrepo, self).tags()
2417 tagscache = super(mqrepo, self).tags()
2418
2418
2419 q = self.mq
2419 q = self.mq
2420 if not q.applied:
2420 if not q.applied:
2421 return tagscache
2421 return tagscache
2422
2422
2423 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2423 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2424
2424
2425 if mqtags[-1][0] not in self.changelog.nodemap:
2425 if mqtags[-1][0] not in self.changelog.nodemap:
2426 self.ui.warn(_('mq status file refers to unknown node %s\n')
2426 self.ui.warn(_('mq status file refers to unknown node %s\n')
2427 % short(mqtags[-1][0]))
2427 % short(mqtags[-1][0]))
2428 return tagscache
2428 return tagscache
2429
2429
2430 mqtags.append((mqtags[-1][0], 'qtip'))
2430 mqtags.append((mqtags[-1][0], 'qtip'))
2431 mqtags.append((mqtags[0][0], 'qbase'))
2431 mqtags.append((mqtags[0][0], 'qbase'))
2432 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2432 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2433 for patch in mqtags:
2433 for patch in mqtags:
2434 if patch[1] in tagscache:
2434 if patch[1] in tagscache:
2435 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2435 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2436 % patch[1])
2436 % patch[1])
2437 else:
2437 else:
2438 tagscache[patch[1]] = patch[0]
2438 tagscache[patch[1]] = patch[0]
2439
2439
2440 return tagscache
2440 return tagscache
2441
2441
2442 def _branchtags(self, partial, lrev):
2442 def _branchtags(self, partial, lrev):
2443 q = self.mq
2443 q = self.mq
2444 if not q.applied:
2444 if not q.applied:
2445 return super(mqrepo, self)._branchtags(partial, lrev)
2445 return super(mqrepo, self)._branchtags(partial, lrev)
2446
2446
2447 cl = self.changelog
2447 cl = self.changelog
2448 qbasenode = bin(q.applied[0].rev)
2448 qbasenode = bin(q.applied[0].rev)
2449 if qbasenode not in cl.nodemap:
2449 if qbasenode not in cl.nodemap:
2450 self.ui.warn(_('mq status file refers to unknown node %s\n')
2450 self.ui.warn(_('mq status file refers to unknown node %s\n')
2451 % short(qbasenode))
2451 % short(qbasenode))
2452 return super(mqrepo, self)._branchtags(partial, lrev)
2452 return super(mqrepo, self)._branchtags(partial, lrev)
2453
2453
2454 qbase = cl.rev(qbasenode)
2454 qbase = cl.rev(qbasenode)
2455 start = lrev + 1
2455 start = lrev + 1
2456 if start < qbase:
2456 if start < qbase:
2457 # update the cache (excluding the patches) and save it
2457 # update the cache (excluding the patches) and save it
2458 self._updatebranchcache(partial, lrev+1, qbase)
2458 self._updatebranchcache(partial, lrev+1, qbase)
2459 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2459 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2460 start = qbase
2460 start = qbase
2461 # if start = qbase, the cache is as updated as it should be.
2461 # if start = qbase, the cache is as updated as it should be.
2462 # if start > qbase, the cache includes (part of) the patches.
2462 # if start > qbase, the cache includes (part of) the patches.
2463 # we might as well use it, but we won't save it.
2463 # we might as well use it, but we won't save it.
2464
2464
2465 # update the cache up to the tip
2465 # update the cache up to the tip
2466 self._updatebranchcache(partial, start, len(cl))
2466 self._updatebranchcache(partial, start, len(cl))
2467
2467
2468 return partial
2468 return partial
2469
2469
2470 if repo.local():
2470 if repo.local():
2471 repo.__class__ = mqrepo
2471 repo.__class__ = mqrepo
2472 repo.mq = queue(ui, repo.join(""))
2472 repo.mq = queue(ui, repo.join(""))
2473
2473
2474 def mqimport(orig, ui, repo, *args, **kwargs):
2474 def mqimport(orig, ui, repo, *args, **kwargs):
2475 if hasattr(repo, 'abort_if_wdir_patched'):
2475 if hasattr(repo, 'abort_if_wdir_patched'):
2476 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2476 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2477 kwargs.get('force'))
2477 kwargs.get('force'))
2478 return orig(ui, repo, *args, **kwargs)
2478 return orig(ui, repo, *args, **kwargs)
2479
2479
2480 def uisetup(ui):
2480 def uisetup(ui):
2481 extensions.wrapcommand(commands.table, 'import', mqimport)
2481 extensions.wrapcommand(commands.table, 'import', mqimport)
2482
2482
2483 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2483 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2484
2484
2485 cmdtable = {
2485 cmdtable = {
2486 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2486 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2487 "qclone":
2487 "qclone":
2488 (clone,
2488 (clone,
2489 [('', 'pull', None, _('use pull protocol to copy metadata')),
2489 [('', 'pull', None, _('use pull protocol to copy metadata')),
2490 ('U', 'noupdate', None, _('do not update the new working directories')),
2490 ('U', 'noupdate', None, _('do not update the new working directories')),
2491 ('', 'uncompressed', None,
2491 ('', 'uncompressed', None,
2492 _('use uncompressed transfer (fast over LAN)')),
2492 _('use uncompressed transfer (fast over LAN)')),
2493 ('p', 'patches', '', _('location of source patch repository')),
2493 ('p', 'patches', '', _('location of source patch repository')),
2494 ] + commands.remoteopts,
2494 ] + commands.remoteopts,
2495 _('hg qclone [OPTION]... SOURCE [DEST]')),
2495 _('hg qclone [OPTION]... SOURCE [DEST]')),
2496 "qcommit|qci":
2496 "qcommit|qci":
2497 (commit,
2497 (commit,
2498 commands.table["^commit|ci"][1],
2498 commands.table["^commit|ci"][1],
2499 _('hg qcommit [OPTION]... [FILE]...')),
2499 _('hg qcommit [OPTION]... [FILE]...')),
2500 "^qdiff":
2500 "^qdiff":
2501 (diff,
2501 (diff,
2502 commands.diffopts + commands.diffopts2 + commands.walkopts,
2502 commands.diffopts + commands.diffopts2 + commands.walkopts,
2503 _('hg qdiff [OPTION]... [FILE]...')),
2503 _('hg qdiff [OPTION]... [FILE]...')),
2504 "qdelete|qremove|qrm":
2504 "qdelete|qremove|qrm":
2505 (delete,
2505 (delete,
2506 [('k', 'keep', None, _('keep patch file')),
2506 [('k', 'keep', None, _('keep patch file')),
2507 ('r', 'rev', [], _('stop managing a revision'))],
2507 ('r', 'rev', [], _('stop managing a revision'))],
2508 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2508 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2509 'qfold':
2509 'qfold':
2510 (fold,
2510 (fold,
2511 [('e', 'edit', None, _('edit patch header')),
2511 [('e', 'edit', None, _('edit patch header')),
2512 ('k', 'keep', None, _('keep folded patch files')),
2512 ('k', 'keep', None, _('keep folded patch files')),
2513 ] + commands.commitopts,
2513 ] + commands.commitopts,
2514 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2514 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2515 'qgoto':
2515 'qgoto':
2516 (goto,
2516 (goto,
2517 [('f', 'force', None, _('overwrite any local changes'))],
2517 [('f', 'force', None, _('overwrite any local changes'))],
2518 _('hg qgoto [OPTION]... PATCH')),
2518 _('hg qgoto [OPTION]... PATCH')),
2519 'qguard':
2519 'qguard':
2520 (guard,
2520 (guard,
2521 [('l', 'list', None, _('list all patches and guards')),
2521 [('l', 'list', None, _('list all patches and guards')),
2522 ('n', 'none', None, _('drop all guards'))],
2522 ('n', 'none', None, _('drop all guards'))],
2523 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2523 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2524 'qheader': (header, [], _('hg qheader [PATCH]')),
2524 'qheader': (header, [], _('hg qheader [PATCH]')),
2525 "^qimport":
2525 "^qimport":
2526 (qimport,
2526 (qimport,
2527 [('e', 'existing', None, _('import file in patch directory')),
2527 [('e', 'existing', None, _('import file in patch directory')),
2528 ('n', 'name', '', _('patch file name')),
2528 ('n', 'name', '', _('patch file name')),
2529 ('f', 'force', None, _('overwrite existing files')),
2529 ('f', 'force', None, _('overwrite existing files')),
2530 ('r', 'rev', [], _('place existing revisions under mq control')),
2530 ('r', 'rev', [], _('place existing revisions under mq control')),
2531 ('g', 'git', None, _('use git extended diff format'))],
2531 ('g', 'git', None, _('use git extended diff format'))],
2532 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2532 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2533 "^qinit":
2533 "^qinit":
2534 (init,
2534 (init,
2535 [('c', 'create-repo', None, _('create queue repository'))],
2535 [('c', 'create-repo', None, _('create queue repository'))],
2536 _('hg qinit [-c]')),
2536 _('hg qinit [-c]')),
2537 "qnew":
2537 "qnew":
2538 (new,
2538 (new,
2539 [('e', 'edit', None, _('edit commit message')),
2539 [('e', 'edit', None, _('edit commit message')),
2540 ('f', 'force', None, _('import uncommitted changes into patch')),
2540 ('f', 'force', None, _('import uncommitted changes into patch')),
2541 ('g', 'git', None, _('use git extended diff format')),
2541 ('g', 'git', None, _('use git extended diff format')),
2542 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2542 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2543 ('u', 'user', '', _('add "From: <given user>" to patch')),
2543 ('u', 'user', '', _('add "From: <given user>" to patch')),
2544 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2544 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2545 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2545 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2546 ] + commands.walkopts + commands.commitopts,
2546 ] + commands.walkopts + commands.commitopts,
2547 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2547 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2548 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2548 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2549 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2549 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2550 "^qpop":
2550 "^qpop":
2551 (pop,
2551 (pop,
2552 [('a', 'all', None, _('pop all patches')),
2552 [('a', 'all', None, _('pop all patches')),
2553 ('n', 'name', '', _('queue name to pop')),
2553 ('n', 'name', '', _('queue name to pop')),
2554 ('f', 'force', None, _('forget any local changes'))],
2554 ('f', 'force', None, _('forget any local changes'))],
2555 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2555 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2556 "^qpush":
2556 "^qpush":
2557 (push,
2557 (push,
2558 [('f', 'force', None, _('apply if the patch has rejects')),
2558 [('f', 'force', None, _('apply if the patch has rejects')),
2559 ('l', 'list', None, _('list patch name in commit text')),
2559 ('l', 'list', None, _('list patch name in commit text')),
2560 ('a', 'all', None, _('apply all patches')),
2560 ('a', 'all', None, _('apply all patches')),
2561 ('m', 'merge', None, _('merge from another queue')),
2561 ('m', 'merge', None, _('merge from another queue')),
2562 ('n', 'name', '', _('merge queue name'))],
2562 ('n', 'name', '', _('merge queue name'))],
2563 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2563 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2564 "^qrefresh":
2564 "^qrefresh":
2565 (refresh,
2565 (refresh,
2566 [('e', 'edit', None, _('edit commit message')),
2566 [('e', 'edit', None, _('edit commit message')),
2567 ('g', 'git', None, _('use git extended diff format')),
2567 ('g', 'git', None, _('use git extended diff format')),
2568 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2568 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2569 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2569 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2570 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2570 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2571 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2571 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2572 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2572 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2573 ] + commands.walkopts + commands.commitopts,
2573 ] + commands.walkopts + commands.commitopts,
2574 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2574 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2575 'qrename|qmv':
2575 'qrename|qmv':
2576 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2576 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2577 "qrestore":
2577 "qrestore":
2578 (restore,
2578 (restore,
2579 [('d', 'delete', None, _('delete save entry')),
2579 [('d', 'delete', None, _('delete save entry')),
2580 ('u', 'update', None, _('update queue working directory'))],
2580 ('u', 'update', None, _('update queue working directory'))],
2581 _('hg qrestore [-d] [-u] REV')),
2581 _('hg qrestore [-d] [-u] REV')),
2582 "qsave":
2582 "qsave":
2583 (save,
2583 (save,
2584 [('c', 'copy', None, _('copy patch directory')),
2584 [('c', 'copy', None, _('copy patch directory')),
2585 ('n', 'name', '', _('copy directory name')),
2585 ('n', 'name', '', _('copy directory name')),
2586 ('e', 'empty', None, _('clear queue status file')),
2586 ('e', 'empty', None, _('clear queue status file')),
2587 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2587 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2588 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2588 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2589 "qselect":
2589 "qselect":
2590 (select,
2590 (select,
2591 [('n', 'none', None, _('disable all guards')),
2591 [('n', 'none', None, _('disable all guards')),
2592 ('s', 'series', None, _('list all guards in series file')),
2592 ('s', 'series', None, _('list all guards in series file')),
2593 ('', 'pop', None, _('pop to before first guarded applied patch')),
2593 ('', 'pop', None, _('pop to before first guarded applied patch')),
2594 ('', 'reapply', None, _('pop, then reapply patches'))],
2594 ('', 'reapply', None, _('pop, then reapply patches'))],
2595 _('hg qselect [OPTION]... [GUARD]...')),
2595 _('hg qselect [OPTION]... [GUARD]...')),
2596 "qseries":
2596 "qseries":
2597 (series,
2597 (series,
2598 [('m', 'missing', None, _('print patches not in series')),
2598 [('m', 'missing', None, _('print patches not in series')),
2599 ] + seriesopts,
2599 ] + seriesopts,
2600 _('hg qseries [-ms]')),
2600 _('hg qseries [-ms]')),
2601 "^strip":
2601 "^strip":
2602 (strip,
2602 (strip,
2603 [('f', 'force', None, _('force removal with local changes')),
2603 [('f', 'force', None, _('force removal with local changes')),
2604 ('b', 'backup', None, _('bundle unrelated changesets')),
2604 ('b', 'backup', None, _('bundle unrelated changesets')),
2605 ('n', 'nobackup', None, _('no backups'))],
2605 ('n', 'nobackup', None, _('no backups'))],
2606 _('hg strip [-f] [-b] [-n] REV')),
2606 _('hg strip [-f] [-b] [-n] REV')),
2607 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2607 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2608 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2608 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2609 "qfinish":
2609 "qfinish":
2610 (finish,
2610 (finish,
2611 [('a', 'applied', None, _('finish all applied changesets'))],
2611 [('a', 'applied', None, _('finish all applied changesets'))],
2612 _('hg qfinish [-a] [REV...]')),
2612 _('hg qfinish [-a] [REV...]')),
2613 }
2613 }
@@ -1,806 +1,806 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, bdiff, error, util, os, errno
10 import ancestor, bdiff, error, util, os, errno
11
11
12 class propertycache(object):
12 class propertycache(object):
13 def __init__(self, func):
13 def __init__(self, func):
14 self.func = func
14 self.func = func
15 self.name = func.__name__
15 self.name = func.__name__
16 def __get__(self, obj, type=None):
16 def __get__(self, obj, type=None):
17 result = self.func(obj)
17 result = self.func(obj)
18 setattr(obj, self.name, result)
18 setattr(obj, self.name, result)
19 return result
19 return result
20
20
21 class changectx(object):
21 class changectx(object):
22 """A changecontext object makes access to data related to a particular
22 """A changecontext object makes access to data related to a particular
23 changeset convenient."""
23 changeset convenient."""
24 def __init__(self, repo, changeid=''):
24 def __init__(self, repo, changeid=''):
25 """changeid is a revision number, node, or tag"""
25 """changeid is a revision number, node, or tag"""
26 if changeid == '':
26 if changeid == '':
27 changeid = '.'
27 changeid = '.'
28 self._repo = repo
28 self._repo = repo
29 if isinstance(changeid, (long, int)):
29 if isinstance(changeid, (long, int)):
30 self._rev = changeid
30 self._rev = changeid
31 self._node = self._repo.changelog.node(changeid)
31 self._node = self._repo.changelog.node(changeid)
32 else:
32 else:
33 self._node = self._repo.lookup(changeid)
33 self._node = self._repo.lookup(changeid)
34 self._rev = self._repo.changelog.rev(self._node)
34 self._rev = self._repo.changelog.rev(self._node)
35
35
36 def __str__(self):
36 def __str__(self):
37 return short(self.node())
37 return short(self.node())
38
38
39 def __int__(self):
39 def __int__(self):
40 return self.rev()
40 return self.rev()
41
41
42 def __repr__(self):
42 def __repr__(self):
43 return "<changectx %s>" % str(self)
43 return "<changectx %s>" % str(self)
44
44
45 def __hash__(self):
45 def __hash__(self):
46 try:
46 try:
47 return hash(self._rev)
47 return hash(self._rev)
48 except AttributeError:
48 except AttributeError:
49 return id(self)
49 return id(self)
50
50
51 def __eq__(self, other):
51 def __eq__(self, other):
52 try:
52 try:
53 return self._rev == other._rev
53 return self._rev == other._rev
54 except AttributeError:
54 except AttributeError:
55 return False
55 return False
56
56
57 def __ne__(self, other):
57 def __ne__(self, other):
58 return not (self == other)
58 return not (self == other)
59
59
60 def __nonzero__(self):
60 def __nonzero__(self):
61 return self._rev != nullrev
61 return self._rev != nullrev
62
62
63 def _changeset(self):
63 def _changeset(self):
64 return self._repo.changelog.read(self.node())
64 return self._repo.changelog.read(self.node())
65 _changeset = propertycache(_changeset)
65 _changeset = propertycache(_changeset)
66
66
67 def _manifest(self):
67 def _manifest(self):
68 return self._repo.manifest.read(self._changeset[0])
68 return self._repo.manifest.read(self._changeset[0])
69 _manifest = propertycache(_manifest)
69 _manifest = propertycache(_manifest)
70
70
71 def _manifestdelta(self):
71 def _manifestdelta(self):
72 return self._repo.manifest.readdelta(self._changeset[0])
72 return self._repo.manifest.readdelta(self._changeset[0])
73 _manifestdelta = propertycache(_manifestdelta)
73 _manifestdelta = propertycache(_manifestdelta)
74
74
75 def _parents(self):
75 def _parents(self):
76 p = self._repo.changelog.parentrevs(self._rev)
76 p = self._repo.changelog.parentrevs(self._rev)
77 if p[1] == nullrev:
77 if p[1] == nullrev:
78 p = p[:-1]
78 p = p[:-1]
79 return [changectx(self._repo, x) for x in p]
79 return [changectx(self._repo, x) for x in p]
80 _parents = propertycache(_parents)
80 _parents = propertycache(_parents)
81
81
82 def __contains__(self, key):
82 def __contains__(self, key):
83 return key in self._manifest
83 return key in self._manifest
84
84
85 def __getitem__(self, key):
85 def __getitem__(self, key):
86 return self.filectx(key)
86 return self.filectx(key)
87
87
88 def __iter__(self):
88 def __iter__(self):
89 for f in util.sort(self._manifest):
89 for f in util.sort(self._manifest):
90 yield f
90 yield f
91
91
92 def changeset(self): return self._changeset
92 def changeset(self): return self._changeset
93 def manifest(self): return self._manifest
93 def manifest(self): return self._manifest
94
94
95 def rev(self): return self._rev
95 def rev(self): return self._rev
96 def node(self): return self._node
96 def node(self): return self._node
97 def hex(self): return hex(self._node)
97 def hex(self): return hex(self._node)
98 def user(self): return self._changeset[1]
98 def user(self): return self._changeset[1]
99 def date(self): return self._changeset[2]
99 def date(self): return self._changeset[2]
100 def files(self): return self._changeset[3]
100 def files(self): return self._changeset[3]
101 def description(self): return self._changeset[4]
101 def description(self): return self._changeset[4]
102 def branch(self): return self._changeset[5].get("branch")
102 def branch(self): return self._changeset[5].get("branch")
103 def extra(self): return self._changeset[5]
103 def extra(self): return self._changeset[5]
104 def tags(self): return self._repo.nodetags(self._node)
104 def tags(self): return self._repo.nodetags(self._node)
105
105
106 def parents(self):
106 def parents(self):
107 """return contexts for each parent changeset"""
107 """return contexts for each parent changeset"""
108 return self._parents
108 return self._parents
109
109
110 def children(self):
110 def children(self):
111 """return contexts for each child changeset"""
111 """return contexts for each child changeset"""
112 c = self._repo.changelog.children(self._node)
112 c = self._repo.changelog.children(self._node)
113 return [changectx(self._repo, x) for x in c]
113 return [changectx(self._repo, x) for x in c]
114
114
115 def ancestors(self):
115 def ancestors(self):
116 for a in self._repo.changelog.ancestors(self._rev):
116 for a in self._repo.changelog.ancestors(self._rev):
117 yield changectx(self._repo, a)
117 yield changectx(self._repo, a)
118
118
119 def descendants(self):
119 def descendants(self):
120 for d in self._repo.changelog.descendants(self._rev):
120 for d in self._repo.changelog.descendants(self._rev):
121 yield changectx(self._repo, d)
121 yield changectx(self._repo, d)
122
122
123 def _fileinfo(self, path):
123 def _fileinfo(self, path):
124 if '_manifest' in self.__dict__:
124 if '_manifest' in self.__dict__:
125 try:
125 try:
126 return self._manifest[path], self._manifest.flags(path)
126 return self._manifest[path], self._manifest.flags(path)
127 except KeyError:
127 except KeyError:
128 raise error.LookupError(self._node, path,
128 raise error.LookupError(self._node, path,
129 _('not found in manifest'))
129 _('not found in manifest'))
130 if '_manifestdelta' in self.__dict__ or path in self.files():
130 if '_manifestdelta' in self.__dict__ or path in self.files():
131 if path in self._manifestdelta:
131 if path in self._manifestdelta:
132 return self._manifestdelta[path], self._manifestdelta.flags(path)
132 return self._manifestdelta[path], self._manifestdelta.flags(path)
133 node, flag = self._repo.manifest.find(self._changeset[0], path)
133 node, flag = self._repo.manifest.find(self._changeset[0], path)
134 if not node:
134 if not node:
135 raise error.LookupError(self._node, path,
135 raise error.LookupError(self._node, path,
136 _('not found in manifest'))
136 _('not found in manifest'))
137
137
138 return node, flag
138 return node, flag
139
139
140 def filenode(self, path):
140 def filenode(self, path):
141 return self._fileinfo(path)[0]
141 return self._fileinfo(path)[0]
142
142
143 def flags(self, path):
143 def flags(self, path):
144 try:
144 try:
145 return self._fileinfo(path)[1]
145 return self._fileinfo(path)[1]
146 except error.LookupError:
146 except error.LookupError:
147 return ''
147 return ''
148
148
149 def filectx(self, path, fileid=None, filelog=None):
149 def filectx(self, path, fileid=None, filelog=None):
150 """get a file context from this changeset"""
150 """get a file context from this changeset"""
151 if fileid is None:
151 if fileid is None:
152 fileid = self.filenode(path)
152 fileid = self.filenode(path)
153 return filectx(self._repo, path, fileid=fileid,
153 return filectx(self._repo, path, fileid=fileid,
154 changectx=self, filelog=filelog)
154 changectx=self, filelog=filelog)
155
155
156 def ancestor(self, c2):
156 def ancestor(self, c2):
157 """
157 """
158 return the ancestor context of self and c2
158 return the ancestor context of self and c2
159 """
159 """
160 n = self._repo.changelog.ancestor(self._node, c2._node)
160 n = self._repo.changelog.ancestor(self._node, c2._node)
161 return changectx(self._repo, n)
161 return changectx(self._repo, n)
162
162
163 def walk(self, match):
163 def walk(self, match):
164 fdict = dict.fromkeys(match.files())
164 fdict = dict.fromkeys(match.files())
165 # for dirstate.walk, files=['.'] means "walk the whole tree".
165 # for dirstate.walk, files=['.'] means "walk the whole tree".
166 # follow that here, too
166 # follow that here, too
167 fdict.pop('.', None)
167 fdict.pop('.', None)
168 for fn in self:
168 for fn in self:
169 for ffn in fdict:
169 for ffn in fdict:
170 # match if the file is the exact name or a directory
170 # match if the file is the exact name or a directory
171 if ffn == fn or fn.startswith("%s/" % ffn):
171 if ffn == fn or fn.startswith("%s/" % ffn):
172 del fdict[ffn]
172 del fdict[ffn]
173 break
173 break
174 if match(fn):
174 if match(fn):
175 yield fn
175 yield fn
176 for fn in util.sort(fdict):
176 for fn in util.sort(fdict):
177 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
177 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
178 yield fn
178 yield fn
179
179
180 class filectx(object):
180 class filectx(object):
181 """A filecontext object makes access to data related to a particular
181 """A filecontext object makes access to data related to a particular
182 filerevision convenient."""
182 filerevision convenient."""
183 def __init__(self, repo, path, changeid=None, fileid=None,
183 def __init__(self, repo, path, changeid=None, fileid=None,
184 filelog=None, changectx=None):
184 filelog=None, changectx=None):
185 """changeid can be a changeset revision, node, or tag.
185 """changeid can be a changeset revision, node, or tag.
186 fileid can be a file revision or node."""
186 fileid can be a file revision or node."""
187 self._repo = repo
187 self._repo = repo
188 self._path = path
188 self._path = path
189
189
190 assert (changeid is not None
190 assert (changeid is not None
191 or fileid is not None
191 or fileid is not None
192 or changectx is not None)
192 or changectx is not None)
193
193
194 if filelog:
194 if filelog:
195 self._filelog = filelog
195 self._filelog = filelog
196
196
197 if changeid is not None:
197 if changeid is not None:
198 self._changeid = changeid
198 self._changeid = changeid
199 if changectx is not None:
199 if changectx is not None:
200 self._changectx = changectx
200 self._changectx = changectx
201 if fileid is not None:
201 if fileid is not None:
202 self._fileid = fileid
202 self._fileid = fileid
203
203
204 def _changectx(self):
204 def _changectx(self):
205 return changectx(self._repo, self._changeid)
205 return changectx(self._repo, self._changeid)
206 _changectx = propertycache(_changectx)
206 _changectx = propertycache(_changectx)
207
207
208 def _filelog(self):
208 def _filelog(self):
209 return self._repo.file(self._path)
209 return self._repo.file(self._path)
210 _filelog = propertycache(_filelog)
210 _filelog = propertycache(_filelog)
211
211
212 def _changeid(self):
212 def _changeid(self):
213 if '_changectx' in self.__dict__:
213 if '_changectx' in self.__dict__:
214 return self._changectx.rev()
214 return self._changectx.rev()
215 else:
215 else:
216 return self._filelog.linkrev(self._filerev)
216 return self._filelog.linkrev(self._filerev)
217 _changeid = propertycache(_changeid)
217 _changeid = propertycache(_changeid)
218
218
219 def _filenode(self):
219 def _filenode(self):
220 if '_fileid' in self.__dict__:
220 if '_fileid' in self.__dict__:
221 return self._filelog.lookup(self._fileid)
221 return self._filelog.lookup(self._fileid)
222 else:
222 else:
223 return self._changectx.filenode(self._path)
223 return self._changectx.filenode(self._path)
224 _filenode = propertycache(_filenode)
224 _filenode = propertycache(_filenode)
225
225
226 def _filerev(self):
226 def _filerev(self):
227 return self._filelog.rev(self._filenode)
227 return self._filelog.rev(self._filenode)
228 _filerev = propertycache(_filerev)
228 _filerev = propertycache(_filerev)
229
229
230 def _repopath(self):
230 def _repopath(self):
231 return self._path
231 return self._path
232 _repopath = propertycache(_repopath)
232 _repopath = propertycache(_repopath)
233
233
234 def __nonzero__(self):
234 def __nonzero__(self):
235 try:
235 try:
236 self._filenode
236 self._filenode
237 return True
237 return True
238 except error.LookupError:
238 except error.LookupError:
239 # file is missing
239 # file is missing
240 return False
240 return False
241
241
242 def __str__(self):
242 def __str__(self):
243 return "%s@%s" % (self.path(), short(self.node()))
243 return "%s@%s" % (self.path(), short(self.node()))
244
244
245 def __repr__(self):
245 def __repr__(self):
246 return "<filectx %s>" % str(self)
246 return "<filectx %s>" % str(self)
247
247
248 def __hash__(self):
248 def __hash__(self):
249 try:
249 try:
250 return hash((self._path, self._fileid))
250 return hash((self._path, self._fileid))
251 except AttributeError:
251 except AttributeError:
252 return id(self)
252 return id(self)
253
253
254 def __eq__(self, other):
254 def __eq__(self, other):
255 try:
255 try:
256 return (self._path == other._path
256 return (self._path == other._path
257 and self._fileid == other._fileid)
257 and self._fileid == other._fileid)
258 except AttributeError:
258 except AttributeError:
259 return False
259 return False
260
260
261 def __ne__(self, other):
261 def __ne__(self, other):
262 return not (self == other)
262 return not (self == other)
263
263
264 def filectx(self, fileid):
264 def filectx(self, fileid):
265 '''opens an arbitrary revision of the file without
265 '''opens an arbitrary revision of the file without
266 opening a new filelog'''
266 opening a new filelog'''
267 return filectx(self._repo, self._path, fileid=fileid,
267 return filectx(self._repo, self._path, fileid=fileid,
268 filelog=self._filelog)
268 filelog=self._filelog)
269
269
270 def filerev(self): return self._filerev
270 def filerev(self): return self._filerev
271 def filenode(self): return self._filenode
271 def filenode(self): return self._filenode
272 def flags(self): return self._changectx.flags(self._path)
272 def flags(self): return self._changectx.flags(self._path)
273 def filelog(self): return self._filelog
273 def filelog(self): return self._filelog
274
274
275 def rev(self):
275 def rev(self):
276 if '_changectx' in self.__dict__:
276 if '_changectx' in self.__dict__:
277 return self._changectx.rev()
277 return self._changectx.rev()
278 if '_changeid' in self.__dict__:
278 if '_changeid' in self.__dict__:
279 return self._changectx.rev()
279 return self._changectx.rev()
280 return self._filelog.linkrev(self._filerev)
280 return self._filelog.linkrev(self._filerev)
281
281
282 def linkrev(self): return self._filelog.linkrev(self._filerev)
282 def linkrev(self): return self._filelog.linkrev(self._filerev)
283 def node(self): return self._changectx.node()
283 def node(self): return self._changectx.node()
284 def user(self): return self._changectx.user()
284 def user(self): return self._changectx.user()
285 def date(self): return self._changectx.date()
285 def date(self): return self._changectx.date()
286 def files(self): return self._changectx.files()
286 def files(self): return self._changectx.files()
287 def description(self): return self._changectx.description()
287 def description(self): return self._changectx.description()
288 def branch(self): return self._changectx.branch()
288 def branch(self): return self._changectx.branch()
289 def manifest(self): return self._changectx.manifest()
289 def manifest(self): return self._changectx.manifest()
290 def changectx(self): return self._changectx
290 def changectx(self): return self._changectx
291
291
292 def data(self): return self._filelog.read(self._filenode)
292 def data(self): return self._filelog.read(self._filenode)
293 def path(self): return self._path
293 def path(self): return self._path
294 def size(self): return self._filelog.size(self._filerev)
294 def size(self): return self._filelog.size(self._filerev)
295
295
296 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
296 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
297
297
298 def renamed(self):
298 def renamed(self):
299 """check if file was actually renamed in this changeset revision
299 """check if file was actually renamed in this changeset revision
300
300
301 If rename logged in file revision, we report copy for changeset only
301 If rename logged in file revision, we report copy for changeset only
302 if file revisions linkrev points back to the changeset in question
302 if file revisions linkrev points back to the changeset in question
303 or both changeset parents contain different file revisions.
303 or both changeset parents contain different file revisions.
304 """
304 """
305
305
306 renamed = self._filelog.renamed(self._filenode)
306 renamed = self._filelog.renamed(self._filenode)
307 if not renamed:
307 if not renamed:
308 return renamed
308 return renamed
309
309
310 if self.rev() == self.linkrev():
310 if self.rev() == self.linkrev():
311 return renamed
311 return renamed
312
312
313 name = self.path()
313 name = self.path()
314 fnode = self._filenode
314 fnode = self._filenode
315 for p in self._changectx.parents():
315 for p in self._changectx.parents():
316 try:
316 try:
317 if fnode == p.filenode(name):
317 if fnode == p.filenode(name):
318 return None
318 return None
319 except error.LookupError:
319 except error.LookupError:
320 pass
320 pass
321 return renamed
321 return renamed
322
322
323 def parents(self):
323 def parents(self):
324 p = self._path
324 p = self._path
325 fl = self._filelog
325 fl = self._filelog
326 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
326 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
327
327
328 r = self._filelog.renamed(self._filenode)
328 r = self._filelog.renamed(self._filenode)
329 if r:
329 if r:
330 pl[0] = (r[0], r[1], None)
330 pl[0] = (r[0], r[1], None)
331
331
332 return [filectx(self._repo, p, fileid=n, filelog=l)
332 return [filectx(self._repo, p, fileid=n, filelog=l)
333 for p,n,l in pl if n != nullid]
333 for p,n,l in pl if n != nullid]
334
334
335 def children(self):
335 def children(self):
336 # hard for renames
336 # hard for renames
337 c = self._filelog.children(self._filenode)
337 c = self._filelog.children(self._filenode)
338 return [filectx(self._repo, self._path, fileid=x,
338 return [filectx(self._repo, self._path, fileid=x,
339 filelog=self._filelog) for x in c]
339 filelog=self._filelog) for x in c]
340
340
341 def annotate(self, follow=False, linenumber=None):
341 def annotate(self, follow=False, linenumber=None):
342 '''returns a list of tuples of (ctx, line) for each line
342 '''returns a list of tuples of (ctx, line) for each line
343 in the file, where ctx is the filectx of the node where
343 in the file, where ctx is the filectx of the node where
344 that line was last changed.
344 that line was last changed.
345 This returns tuples of ((ctx, linenumber), line) for each line,
345 This returns tuples of ((ctx, linenumber), line) for each line,
346 if "linenumber" parameter is NOT "None".
346 if "linenumber" parameter is NOT "None".
347 In such tuples, linenumber means one at the first appearance
347 In such tuples, linenumber means one at the first appearance
348 in the managed file.
348 in the managed file.
349 To reduce annotation cost,
349 To reduce annotation cost,
350 this returns fixed value(False is used) as linenumber,
350 this returns fixed value(False is used) as linenumber,
351 if "linenumber" parameter is "False".'''
351 if "linenumber" parameter is "False".'''
352
352
353 def decorate_compat(text, rev):
353 def decorate_compat(text, rev):
354 return ([rev] * len(text.splitlines()), text)
354 return ([rev] * len(text.splitlines()), text)
355
355
356 def without_linenumber(text, rev):
356 def without_linenumber(text, rev):
357 return ([(rev, False)] * len(text.splitlines()), text)
357 return ([(rev, False)] * len(text.splitlines()), text)
358
358
359 def with_linenumber(text, rev):
359 def with_linenumber(text, rev):
360 size = len(text.splitlines())
360 size = len(text.splitlines())
361 return ([(rev, i) for i in xrange(1, size + 1)], text)
361 return ([(rev, i) for i in xrange(1, size + 1)], text)
362
362
363 decorate = (((linenumber is None) and decorate_compat) or
363 decorate = (((linenumber is None) and decorate_compat) or
364 (linenumber and with_linenumber) or
364 (linenumber and with_linenumber) or
365 without_linenumber)
365 without_linenumber)
366
366
367 def pair(parent, child):
367 def pair(parent, child):
368 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
368 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
369 child[0][b1:b2] = parent[0][a1:a2]
369 child[0][b1:b2] = parent[0][a1:a2]
370 return child
370 return child
371
371
372 getlog = util.cachefunc(lambda x: self._repo.file(x))
372 getlog = util.cachefunc(lambda x: self._repo.file(x))
373 def getctx(path, fileid):
373 def getctx(path, fileid):
374 log = path == self._path and self._filelog or getlog(path)
374 log = path == self._path and self._filelog or getlog(path)
375 return filectx(self._repo, path, fileid=fileid, filelog=log)
375 return filectx(self._repo, path, fileid=fileid, filelog=log)
376 getctx = util.cachefunc(getctx)
376 getctx = util.cachefunc(getctx)
377
377
378 def parents(f):
378 def parents(f):
379 # we want to reuse filectx objects as much as possible
379 # we want to reuse filectx objects as much as possible
380 p = f._path
380 p = f._path
381 if f._filerev is None: # working dir
381 if f._filerev is None: # working dir
382 pl = [(n.path(), n.filerev()) for n in f.parents()]
382 pl = [(n.path(), n.filerev()) for n in f.parents()]
383 else:
383 else:
384 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
384 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
385
385
386 if follow:
386 if follow:
387 r = f.renamed()
387 r = f.renamed()
388 if r:
388 if r:
389 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
389 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
390
390
391 return [getctx(p, n) for p, n in pl if n != nullrev]
391 return [getctx(p, n) for p, n in pl if n != nullrev]
392
392
393 # use linkrev to find the first changeset where self appeared
393 # use linkrev to find the first changeset where self appeared
394 if self.rev() != self.linkrev():
394 if self.rev() != self.linkrev():
395 base = self.filectx(self.filerev())
395 base = self.filectx(self.filerev())
396 else:
396 else:
397 base = self
397 base = self
398
398
399 # find all ancestors
399 # find all ancestors
400 needed = {base: 1}
400 needed = {base: 1}
401 visit = [base]
401 visit = [base]
402 files = [base._path]
402 files = [base._path]
403 while visit:
403 while visit:
404 f = visit.pop(0)
404 f = visit.pop(0)
405 for p in parents(f):
405 for p in parents(f):
406 if p not in needed:
406 if p not in needed:
407 needed[p] = 1
407 needed[p] = 1
408 visit.append(p)
408 visit.append(p)
409 if p._path not in files:
409 if p._path not in files:
410 files.append(p._path)
410 files.append(p._path)
411 else:
411 else:
412 # count how many times we'll use this
412 # count how many times we'll use this
413 needed[p] += 1
413 needed[p] += 1
414
414
415 # sort by revision (per file) which is a topological order
415 # sort by revision (per file) which is a topological order
416 visit = []
416 visit = []
417 for f in files:
417 for f in files:
418 fn = [(n.rev(), n) for n in needed if n._path == f]
418 fn = [(n.rev(), n) for n in needed if n._path == f]
419 visit.extend(fn)
419 visit.extend(fn)
420
420
421 hist = {}
421 hist = {}
422 for r, f in util.sort(visit):
422 for r, f in util.sort(visit):
423 curr = decorate(f.data(), f)
423 curr = decorate(f.data(), f)
424 for p in parents(f):
424 for p in parents(f):
425 if p != nullid:
425 if p != nullid:
426 curr = pair(hist[p], curr)
426 curr = pair(hist[p], curr)
427 # trim the history of unneeded revs
427 # trim the history of unneeded revs
428 needed[p] -= 1
428 needed[p] -= 1
429 if not needed[p]:
429 if not needed[p]:
430 del hist[p]
430 del hist[p]
431 hist[f] = curr
431 hist[f] = curr
432
432
433 return zip(hist[f][0], hist[f][1].splitlines(1))
433 return zip(hist[f][0], hist[f][1].splitlines(1))
434
434
435 def ancestor(self, fc2):
435 def ancestor(self, fc2):
436 """
436 """
437 find the common ancestor file context, if any, of self, and fc2
437 find the common ancestor file context, if any, of self, and fc2
438 """
438 """
439
439
440 acache = {}
440 acache = {}
441
441
442 # prime the ancestor cache for the working directory
442 # prime the ancestor cache for the working directory
443 for c in (self, fc2):
443 for c in (self, fc2):
444 if c._filerev == None:
444 if c._filerev == None:
445 pl = [(n.path(), n.filenode()) for n in c.parents()]
445 pl = [(n.path(), n.filenode()) for n in c.parents()]
446 acache[(c._path, None)] = pl
446 acache[(c._path, None)] = pl
447
447
448 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
448 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
449 def parents(vertex):
449 def parents(vertex):
450 if vertex in acache:
450 if vertex in acache:
451 return acache[vertex]
451 return acache[vertex]
452 f, n = vertex
452 f, n = vertex
453 if f not in flcache:
453 if f not in flcache:
454 flcache[f] = self._repo.file(f)
454 flcache[f] = self._repo.file(f)
455 fl = flcache[f]
455 fl = flcache[f]
456 pl = [(f, p) for p in fl.parents(n) if p != nullid]
456 pl = [(f, p) for p in fl.parents(n) if p != nullid]
457 re = fl.renamed(n)
457 re = fl.renamed(n)
458 if re:
458 if re:
459 pl.append(re)
459 pl.append(re)
460 acache[vertex] = pl
460 acache[vertex] = pl
461 return pl
461 return pl
462
462
463 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
463 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
464 v = ancestor.ancestor(a, b, parents)
464 v = ancestor.ancestor(a, b, parents)
465 if v:
465 if v:
466 f, n = v
466 f, n = v
467 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
467 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
468
468
469 return None
469 return None
470
470
471 class workingctx(changectx):
471 class workingctx(changectx):
472 """A workingctx object makes access to data related to
472 """A workingctx object makes access to data related to
473 the current working directory convenient.
473 the current working directory convenient.
474 parents - a pair of parent nodeids, or None to use the dirstate.
474 parents - a pair of parent nodeids, or None to use the dirstate.
475 date - any valid date string or (unixtime, offset), or None.
475 date - any valid date string or (unixtime, offset), or None.
476 user - username string, or None.
476 user - username string, or None.
477 extra - a dictionary of extra values, or None.
477 extra - a dictionary of extra values, or None.
478 changes - a list of file lists as returned by localrepo.status()
478 changes - a list of file lists as returned by localrepo.status()
479 or None to use the repository status.
479 or None to use the repository status.
480 """
480 """
481 def __init__(self, repo, parents=None, text="", user=None, date=None,
481 def __init__(self, repo, parents=None, text="", user=None, date=None,
482 extra=None, changes=None):
482 extra=None, changes=None):
483 self._repo = repo
483 self._repo = repo
484 self._rev = None
484 self._rev = None
485 self._node = None
485 self._node = None
486 self._text = text
486 self._text = text
487 if date:
487 if date:
488 self._date = util.parsedate(date)
488 self._date = util.parsedate(date)
489 if user:
489 if user:
490 self._user = user
490 self._user = user
491 if parents:
491 if parents:
492 self._parents = [changectx(self._repo, p) for p in parents]
492 self._parents = [changectx(self._repo, p) for p in parents]
493 if changes:
493 if changes:
494 self._status = list(changes)
494 self._status = list(changes)
495
495
496 self._extra = {}
496 self._extra = {}
497 if extra:
497 if extra:
498 self._extra = extra.copy()
498 self._extra = extra.copy()
499 if 'branch' not in self._extra:
499 if 'branch' not in self._extra:
500 branch = self._repo.dirstate.branch()
500 branch = self._repo.dirstate.branch()
501 try:
501 try:
502 branch = branch.decode('UTF-8').encode('UTF-8')
502 branch = branch.decode('UTF-8').encode('UTF-8')
503 except UnicodeDecodeError:
503 except UnicodeDecodeError:
504 raise util.Abort(_('branch name not in UTF-8!'))
504 raise util.Abort(_('branch name not in UTF-8!'))
505 self._extra['branch'] = branch
505 self._extra['branch'] = branch
506 if self._extra['branch'] == '':
506 if self._extra['branch'] == '':
507 self._extra['branch'] = 'default'
507 self._extra['branch'] = 'default'
508
508
509 def __str__(self):
509 def __str__(self):
510 return str(self._parents[0]) + "+"
510 return str(self._parents[0]) + "+"
511
511
512 def __nonzero__(self):
512 def __nonzero__(self):
513 return True
513 return True
514
514
515 def __contains__(self, key):
515 def __contains__(self, key):
516 return self._repo.dirstate[key] not in "?r"
516 return self._repo.dirstate[key] not in "?r"
517
517
518 def _manifest(self):
518 def _manifest(self):
519 """generate a manifest corresponding to the working directory"""
519 """generate a manifest corresponding to the working directory"""
520
520
521 man = self._parents[0].manifest().copy()
521 man = self._parents[0].manifest().copy()
522 copied = self._repo.dirstate.copies()
522 copied = self._repo.dirstate.copies()
523 cf = lambda x: man.flags(copied.get(x, x))
523 cf = lambda x: man.flags(copied.get(x, x))
524 ff = self._repo.dirstate.flagfunc(cf)
524 ff = self._repo.dirstate.flagfunc(cf)
525 modified, added, removed, deleted, unknown = self._status[:5]
525 modified, added, removed, deleted, unknown = self._status[:5]
526 for i, l in (("a", added), ("m", modified), ("u", unknown)):
526 for i, l in (("a", added), ("m", modified), ("u", unknown)):
527 for f in l:
527 for f in l:
528 man[f] = man.get(copied.get(f, f), nullid) + i
528 man[f] = man.get(copied.get(f, f), nullid) + i
529 try:
529 try:
530 man.set(f, ff(f))
530 man.set(f, ff(f))
531 except OSError:
531 except OSError:
532 pass
532 pass
533
533
534 for f in deleted + removed:
534 for f in deleted + removed:
535 if f in man:
535 if f in man:
536 del man[f]
536 del man[f]
537
537
538 return man
538 return man
539 _manifest = propertycache(_manifest)
539 _manifest = propertycache(_manifest)
540
540
541 def _status(self):
541 def _status(self):
542 return self._repo.status(unknown=True)
542 return self._repo.status(unknown=True)
543 _status = propertycache(_status)
543 _status = propertycache(_status)
544
544
545 def _user(self):
545 def _user(self):
546 return self._repo.ui.username()
546 return self._repo.ui.username()
547 _user = propertycache(_user)
547 _user = propertycache(_user)
548
548
549 def _date(self):
549 def _date(self):
550 return util.makedate()
550 return util.makedate()
551 _date = propertycache(_date)
551 _date = propertycache(_date)
552
552
553 def _parents(self):
553 def _parents(self):
554 p = self._repo.dirstate.parents()
554 p = self._repo.dirstate.parents()
555 if p[1] == nullid:
555 if p[1] == nullid:
556 p = p[:-1]
556 p = p[:-1]
557 self._parents = [changectx(self._repo, x) for x in p]
557 self._parents = [changectx(self._repo, x) for x in p]
558 return self._parents
558 return self._parents
559 _parents = propertycache(_parents)
559 _parents = propertycache(_parents)
560
560
561 def manifest(self): return self._manifest
561 def manifest(self): return self._manifest
562
562
563 def user(self): return self._user or self._repo.ui.username()
563 def user(self): return self._user or self._repo.ui.username()
564 def date(self): return self._date
564 def date(self): return self._date
565 def description(self): return self._text
565 def description(self): return self._text
566 def files(self):
566 def files(self):
567 return util.sort(self._status[0] + self._status[1] + self._status[2])
567 return util.sort(self._status[0] + self._status[1] + self._status[2])
568
568
569 def modified(self): return self._status[0]
569 def modified(self): return self._status[0]
570 def added(self): return self._status[1]
570 def added(self): return self._status[1]
571 def removed(self): return self._status[2]
571 def removed(self): return self._status[2]
572 def deleted(self): return self._status[3]
572 def deleted(self): return self._status[3]
573 def unknown(self): return self._status[4]
573 def unknown(self): return self._status[4]
574 def clean(self): return self._status[5]
574 def clean(self): return self._status[5]
575 def branch(self): return self._extra['branch']
575 def branch(self): return self._extra['branch']
576 def extra(self): return self._extra
576 def extra(self): return self._extra
577
577
578 def tags(self):
578 def tags(self):
579 t = []
579 t = []
580 [t.extend(p.tags()) for p in self.parents()]
580 [t.extend(p.tags()) for p in self.parents()]
581 return t
581 return t
582
582
583 def children(self):
583 def children(self):
584 return []
584 return []
585
585
586 def flags(self, path):
586 def flags(self, path):
587 if '_manifest' in self.__dict__:
587 if '_manifest' in self.__dict__:
588 try:
588 try:
589 return self._manifest.flags(path)
589 return self._manifest.flags(path)
590 except KeyError:
590 except KeyError:
591 return ''
591 return ''
592
592
593 pnode = self._parents[0].changeset()[0]
593 pnode = self._parents[0].changeset()[0]
594 orig = self._repo.dirstate.copies().get(path, path)
594 orig = self._repo.dirstate.copies().get(path, path)
595 node, flag = self._repo.manifest.find(pnode, orig)
595 node, flag = self._repo.manifest.find(pnode, orig)
596 try:
596 try:
597 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
597 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
598 return ff(path)
598 return ff(path)
599 except OSError:
599 except OSError:
600 pass
600 pass
601
601
602 if not node or path in self.deleted() or path in self.removed():
602 if not node or path in self.deleted() or path in self.removed():
603 return ''
603 return ''
604 return flag
604 return flag
605
605
606 def filectx(self, path, filelog=None):
606 def filectx(self, path, filelog=None):
607 """get a file context from the working directory"""
607 """get a file context from the working directory"""
608 return workingfilectx(self._repo, path, workingctx=self,
608 return workingfilectx(self._repo, path, workingctx=self,
609 filelog=filelog)
609 filelog=filelog)
610
610
611 def ancestor(self, c2):
611 def ancestor(self, c2):
612 """return the ancestor context of self and c2"""
612 """return the ancestor context of self and c2"""
613 return self._parents[0].ancestor(c2) # punt on two parents for now
613 return self._parents[0].ancestor(c2) # punt on two parents for now
614
614
615 def walk(self, match):
615 def walk(self, match):
616 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
616 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
617
617
618 class workingfilectx(filectx):
618 class workingfilectx(filectx):
619 """A workingfilectx object makes access to data related to a particular
619 """A workingfilectx object makes access to data related to a particular
620 file in the working directory convenient."""
620 file in the working directory convenient."""
621 def __init__(self, repo, path, filelog=None, workingctx=None):
621 def __init__(self, repo, path, filelog=None, workingctx=None):
622 """changeid can be a changeset revision, node, or tag.
622 """changeid can be a changeset revision, node, or tag.
623 fileid can be a file revision or node."""
623 fileid can be a file revision or node."""
624 self._repo = repo
624 self._repo = repo
625 self._path = path
625 self._path = path
626 self._changeid = None
626 self._changeid = None
627 self._filerev = self._filenode = None
627 self._filerev = self._filenode = None
628
628
629 if filelog:
629 if filelog:
630 self._filelog = filelog
630 self._filelog = filelog
631 if workingctx:
631 if workingctx:
632 self._changectx = workingctx
632 self._changectx = workingctx
633
633
634 def _changectx(self):
634 def _changectx(self):
635 return workingctx(self._repo)
635 return workingctx(self._repo)
636 _changectx = propertycache(_changectx)
636 _changectx = propertycache(_changectx)
637
637
638 def _repopath(self):
638 def _repopath(self):
639 return self._repo.dirstate.copied(self._path) or self._path
639 return self._repo.dirstate.copied(self._path) or self._path
640 _repopath = propertycache(_repopath)
640 _repopath = propertycache(_repopath)
641
641
642 def _filelog(self):
642 def _filelog(self):
643 return self._repo.file(self._repopath)
643 return self._repo.file(self._repopath)
644 _filelog = propertycache(_filelog)
644 _filelog = propertycache(_filelog)
645
645
646 def __nonzero__(self):
646 def __nonzero__(self):
647 return True
647 return True
648
648
649 def __str__(self):
649 def __str__(self):
650 return "%s@%s" % (self.path(), self._changectx)
650 return "%s@%s" % (self.path(), self._changectx)
651
651
652 def filectx(self, fileid):
652 def filectx(self, fileid):
653 '''opens an arbitrary revision of the file without
653 '''opens an arbitrary revision of the file without
654 opening a new filelog'''
654 opening a new filelog'''
655 return filectx(self._repo, self._repopath, fileid=fileid,
655 return filectx(self._repo, self._repopath, fileid=fileid,
656 filelog=self._filelog)
656 filelog=self._filelog)
657
657
658 def rev(self):
658 def rev(self):
659 if '_changectx' in self.__dict__:
659 if '_changectx' in self.__dict__:
660 return self._changectx.rev()
660 return self._changectx.rev()
661 return self._filelog.linkrev(self._filerev)
661 return self._filelog.linkrev(self._filerev)
662
662
663 def data(self): return self._repo.wread(self._path)
663 def data(self): return self._repo.wread(self._path)
664 def renamed(self):
664 def renamed(self):
665 rp = self._repopath
665 rp = self._repopath
666 if rp == self._path:
666 if rp == self._path:
667 return None
667 return None
668 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
668 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
669
669
670 def parents(self):
670 def parents(self):
671 '''return parent filectxs, following copies if necessary'''
671 '''return parent filectxs, following copies if necessary'''
672 p = self._path
672 p = self._path
673 rp = self._repopath
673 rp = self._repopath
674 pcl = self._changectx._parents
674 pcl = self._changectx._parents
675 fl = self._filelog
675 fl = self._filelog
676 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
676 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
677 if len(pcl) > 1:
677 if len(pcl) > 1:
678 if rp != p:
678 if rp != p:
679 fl = None
679 fl = None
680 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
680 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
681
681
682 return [filectx(self._repo, p, fileid=n, filelog=l)
682 return [filectx(self._repo, p, fileid=n, filelog=l)
683 for p,n,l in pl if n != nullid]
683 for p,n,l in pl if n != nullid]
684
684
685 def children(self):
685 def children(self):
686 return []
686 return []
687
687
688 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
688 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
689 def date(self):
689 def date(self):
690 t, tz = self._changectx.date()
690 t, tz = self._changectx.date()
691 try:
691 try:
692 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
692 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
693 except OSError, err:
693 except OSError, err:
694 if err.errno != errno.ENOENT: raise
694 if err.errno != errno.ENOENT: raise
695 return (t, tz)
695 return (t, tz)
696
696
697 def cmp(self, text): return self._repo.wread(self._path) == text
697 def cmp(self, text): return self._repo.wread(self._path) == text
698
698
699 class memctx(object):
699 class memctx(object):
700 """Use memctx to perform in-memory commits via localrepo.commitctx().
700 """Use memctx to perform in-memory commits via localrepo.commitctx().
701
701
702 Revision information is supplied at initialization time while
702 Revision information is supplied at initialization time while
703 related files data and is made available through a callback
703 related files data and is made available through a callback
704 mechanism. 'repo' is the current localrepo, 'parents' is a
704 mechanism. 'repo' is the current localrepo, 'parents' is a
705 sequence of two parent revisions identifiers (pass None for every
705 sequence of two parent revisions identifiers (pass None for every
706 missing parent), 'text' is the commit message and 'files' lists
706 missing parent), 'text' is the commit message and 'files' lists
707 names of files touched by the revision (normalized and relative to
707 names of files touched by the revision (normalized and relative to
708 repository root).
708 repository root).
709
709
710 filectxfn(repo, memctx, path) is a callable receiving the
710 filectxfn(repo, memctx, path) is a callable receiving the
711 repository, the current memctx object and the normalized path of
711 repository, the current memctx object and the normalized path of
712 requested file, relative to repository root. It is fired by the
712 requested file, relative to repository root. It is fired by the
713 commit function for every file in 'files', but calls order is
713 commit function for every file in 'files', but calls order is
714 undefined. If the file is available in the revision being
714 undefined. If the file is available in the revision being
715 committed (updated or added), filectxfn returns a memfilectx
715 committed (updated or added), filectxfn returns a memfilectx
716 object. If the file was removed, filectxfn raises an
716 object. If the file was removed, filectxfn raises an
717 IOError. Moved files are represented by marking the source file
717 IOError. Moved files are represented by marking the source file
718 removed and the new file added with copy information (see
718 removed and the new file added with copy information (see
719 memfilectx).
719 memfilectx).
720
720
721 user receives the committer name and defaults to current
721 user receives the committer name and defaults to current
722 repository username, date is the commit date in any format
722 repository username, date is the commit date in any format
723 supported by util.parsedate() and defaults to current date, extra
723 supported by util.parsedate() and defaults to current date, extra
724 is a dictionary of metadata or is left empty.
724 is a dictionary of metadata or is left empty.
725 """
725 """
726 def __init__(self, repo, parents, text, files, filectxfn, user=None,
726 def __init__(self, repo, parents, text, files, filectxfn, user=None,
727 date=None, extra=None):
727 date=None, extra=None):
728 self._repo = repo
728 self._repo = repo
729 self._rev = None
729 self._rev = None
730 self._node = None
730 self._node = None
731 self._text = text
731 self._text = text
732 self._date = date and util.parsedate(date) or util.makedate()
732 self._date = date and util.parsedate(date) or util.makedate()
733 self._user = user
733 self._user = user
734 parents = [(p or nullid) for p in parents]
734 parents = [(p or nullid) for p in parents]
735 p1, p2 = parents
735 p1, p2 = parents
736 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
736 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
737 files = util.sort(util.unique(files))
737 files = util.sort(set(files))
738 self._status = [files, [], [], [], []]
738 self._status = [files, [], [], [], []]
739 self._filectxfn = filectxfn
739 self._filectxfn = filectxfn
740
740
741 self._extra = extra and extra.copy() or {}
741 self._extra = extra and extra.copy() or {}
742 if 'branch' not in self._extra:
742 if 'branch' not in self._extra:
743 self._extra['branch'] = 'default'
743 self._extra['branch'] = 'default'
744 elif self._extra.get('branch') == '':
744 elif self._extra.get('branch') == '':
745 self._extra['branch'] = 'default'
745 self._extra['branch'] = 'default'
746
746
747 def __str__(self):
747 def __str__(self):
748 return str(self._parents[0]) + "+"
748 return str(self._parents[0]) + "+"
749
749
750 def __int__(self):
750 def __int__(self):
751 return self._rev
751 return self._rev
752
752
753 def __nonzero__(self):
753 def __nonzero__(self):
754 return True
754 return True
755
755
756 def user(self): return self._user or self._repo.ui.username()
756 def user(self): return self._user or self._repo.ui.username()
757 def date(self): return self._date
757 def date(self): return self._date
758 def description(self): return self._text
758 def description(self): return self._text
759 def files(self): return self.modified()
759 def files(self): return self.modified()
760 def modified(self): return self._status[0]
760 def modified(self): return self._status[0]
761 def added(self): return self._status[1]
761 def added(self): return self._status[1]
762 def removed(self): return self._status[2]
762 def removed(self): return self._status[2]
763 def deleted(self): return self._status[3]
763 def deleted(self): return self._status[3]
764 def unknown(self): return self._status[4]
764 def unknown(self): return self._status[4]
765 def clean(self): return self._status[5]
765 def clean(self): return self._status[5]
766 def branch(self): return self._extra['branch']
766 def branch(self): return self._extra['branch']
767 def extra(self): return self._extra
767 def extra(self): return self._extra
768 def flags(self, f): return self[f].flags()
768 def flags(self, f): return self[f].flags()
769
769
770 def parents(self):
770 def parents(self):
771 """return contexts for each parent changeset"""
771 """return contexts for each parent changeset"""
772 return self._parents
772 return self._parents
773
773
774 def filectx(self, path, filelog=None):
774 def filectx(self, path, filelog=None):
775 """get a file context from the working directory"""
775 """get a file context from the working directory"""
776 return self._filectxfn(self._repo, self, path)
776 return self._filectxfn(self._repo, self, path)
777
777
778 class memfilectx(object):
778 class memfilectx(object):
779 """memfilectx represents an in-memory file to commit.
779 """memfilectx represents an in-memory file to commit.
780
780
781 See memctx for more details.
781 See memctx for more details.
782 """
782 """
783 def __init__(self, path, data, islink, isexec, copied):
783 def __init__(self, path, data, islink, isexec, copied):
784 """
784 """
785 path is the normalized file path relative to repository root.
785 path is the normalized file path relative to repository root.
786 data is the file content as a string.
786 data is the file content as a string.
787 islink is True if the file is a symbolic link.
787 islink is True if the file is a symbolic link.
788 isexec is True if the file is executable.
788 isexec is True if the file is executable.
789 copied is the source file path if current file was copied in the
789 copied is the source file path if current file was copied in the
790 revision being committed, or None."""
790 revision being committed, or None."""
791 self._path = path
791 self._path = path
792 self._data = data
792 self._data = data
793 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
793 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
794 self._copied = None
794 self._copied = None
795 if copied:
795 if copied:
796 self._copied = (copied, nullid)
796 self._copied = (copied, nullid)
797
797
798 def __nonzero__(self): return True
798 def __nonzero__(self): return True
799 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
799 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
800 def path(self): return self._path
800 def path(self): return self._path
801 def data(self): return self._data
801 def data(self): return self._data
802 def flags(self): return self._flags
802 def flags(self): return self._flags
803 def isexec(self): return 'x' in self._flags
803 def isexec(self): return 'x' in self._flags
804 def islink(self): return 'l' in self._flags
804 def islink(self): return 'l' in self._flags
805 def renamed(self): return self._copied
805 def renamed(self): return self._copied
806
806
@@ -1,585 +1,585 b''
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import nullid
10 from node import nullid
11 from i18n import _
11 from i18n import _
12 import struct, os, stat, util, errno, ignore
12 import struct, os, stat, util, errno, ignore
13 import cStringIO, osutil, sys, parsers
13 import cStringIO, osutil, sys, parsers
14
14
15 _unknown = ('?', 0, 0, 0)
15 _unknown = ('?', 0, 0, 0)
16 _format = ">cllll"
16 _format = ">cllll"
17
17
18 def _finddirs(path):
18 def _finddirs(path):
19 pos = path.rfind('/')
19 pos = path.rfind('/')
20 while pos != -1:
20 while pos != -1:
21 yield path[:pos]
21 yield path[:pos]
22 pos = path.rfind('/', 0, pos)
22 pos = path.rfind('/', 0, pos)
23
23
24 def _incdirs(dirs, path):
24 def _incdirs(dirs, path):
25 for base in _finddirs(path):
25 for base in _finddirs(path):
26 if base in dirs:
26 if base in dirs:
27 dirs[base] += 1
27 dirs[base] += 1
28 return
28 return
29 dirs[base] = 1
29 dirs[base] = 1
30
30
31 def _decdirs(dirs, path):
31 def _decdirs(dirs, path):
32 for base in _finddirs(path):
32 for base in _finddirs(path):
33 if dirs[base] > 1:
33 if dirs[base] > 1:
34 dirs[base] -= 1
34 dirs[base] -= 1
35 return
35 return
36 del dirs[base]
36 del dirs[base]
37
37
38 class dirstate(object):
38 class dirstate(object):
39
39
40 def __init__(self, opener, ui, root):
40 def __init__(self, opener, ui, root):
41 self._opener = opener
41 self._opener = opener
42 self._root = root
42 self._root = root
43 self._rootdir = os.path.join(root, '')
43 self._rootdir = os.path.join(root, '')
44 self._dirty = False
44 self._dirty = False
45 self._dirtypl = False
45 self._dirtypl = False
46 self._ui = ui
46 self._ui = ui
47
47
48 def __getattr__(self, name):
48 def __getattr__(self, name):
49 if name == '_map':
49 if name == '_map':
50 self._read()
50 self._read()
51 return self._map
51 return self._map
52 elif name == '_copymap':
52 elif name == '_copymap':
53 self._read()
53 self._read()
54 return self._copymap
54 return self._copymap
55 elif name == '_foldmap':
55 elif name == '_foldmap':
56 _foldmap = {}
56 _foldmap = {}
57 for name in self._map:
57 for name in self._map:
58 norm = os.path.normcase(name)
58 norm = os.path.normcase(name)
59 _foldmap[norm] = name
59 _foldmap[norm] = name
60 self._foldmap = _foldmap
60 self._foldmap = _foldmap
61 return self._foldmap
61 return self._foldmap
62 elif name == '_branch':
62 elif name == '_branch':
63 try:
63 try:
64 self._branch = (self._opener("branch").read().strip()
64 self._branch = (self._opener("branch").read().strip()
65 or "default")
65 or "default")
66 except IOError:
66 except IOError:
67 self._branch = "default"
67 self._branch = "default"
68 return self._branch
68 return self._branch
69 elif name == '_pl':
69 elif name == '_pl':
70 self._pl = [nullid, nullid]
70 self._pl = [nullid, nullid]
71 try:
71 try:
72 st = self._opener("dirstate").read(40)
72 st = self._opener("dirstate").read(40)
73 if len(st) == 40:
73 if len(st) == 40:
74 self._pl = st[:20], st[20:40]
74 self._pl = st[:20], st[20:40]
75 except IOError, err:
75 except IOError, err:
76 if err.errno != errno.ENOENT: raise
76 if err.errno != errno.ENOENT: raise
77 return self._pl
77 return self._pl
78 elif name == '_dirs':
78 elif name == '_dirs':
79 dirs = {}
79 dirs = {}
80 for f,s in self._map.iteritems():
80 for f,s in self._map.iteritems():
81 if s[0] != 'r':
81 if s[0] != 'r':
82 _incdirs(dirs, f)
82 _incdirs(dirs, f)
83 self._dirs = dirs
83 self._dirs = dirs
84 return self._dirs
84 return self._dirs
85 elif name == '_ignore':
85 elif name == '_ignore':
86 files = [self._join('.hgignore')]
86 files = [self._join('.hgignore')]
87 for name, path in self._ui.configitems("ui"):
87 for name, path in self._ui.configitems("ui"):
88 if name == 'ignore' or name.startswith('ignore.'):
88 if name == 'ignore' or name.startswith('ignore.'):
89 files.append(os.path.expanduser(path))
89 files.append(os.path.expanduser(path))
90 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
90 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
91 return self._ignore
91 return self._ignore
92 elif name == '_slash':
92 elif name == '_slash':
93 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
93 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
94 return self._slash
94 return self._slash
95 elif name == '_checklink':
95 elif name == '_checklink':
96 self._checklink = util.checklink(self._root)
96 self._checklink = util.checklink(self._root)
97 return self._checklink
97 return self._checklink
98 elif name == '_checkexec':
98 elif name == '_checkexec':
99 self._checkexec = util.checkexec(self._root)
99 self._checkexec = util.checkexec(self._root)
100 return self._checkexec
100 return self._checkexec
101 elif name == '_checkcase':
101 elif name == '_checkcase':
102 self._checkcase = not util.checkcase(self._join('.hg'))
102 self._checkcase = not util.checkcase(self._join('.hg'))
103 return self._checkcase
103 return self._checkcase
104 elif name == 'normalize':
104 elif name == 'normalize':
105 if self._checkcase:
105 if self._checkcase:
106 self.normalize = self._normalize
106 self.normalize = self._normalize
107 else:
107 else:
108 self.normalize = lambda x, y=False: x
108 self.normalize = lambda x, y=False: x
109 return self.normalize
109 return self.normalize
110 else:
110 else:
111 raise AttributeError(name)
111 raise AttributeError(name)
112
112
113 def _join(self, f):
113 def _join(self, f):
114 # much faster than os.path.join()
114 # much faster than os.path.join()
115 # it's safe because f is always a relative path
115 # it's safe because f is always a relative path
116 return self._rootdir + f
116 return self._rootdir + f
117
117
118 def flagfunc(self, fallback):
118 def flagfunc(self, fallback):
119 if self._checklink:
119 if self._checklink:
120 if self._checkexec:
120 if self._checkexec:
121 def f(x):
121 def f(x):
122 p = self._join(x)
122 p = self._join(x)
123 if os.path.islink(p):
123 if os.path.islink(p):
124 return 'l'
124 return 'l'
125 if util.is_exec(p):
125 if util.is_exec(p):
126 return 'x'
126 return 'x'
127 return ''
127 return ''
128 return f
128 return f
129 def f(x):
129 def f(x):
130 if os.path.islink(self._join(x)):
130 if os.path.islink(self._join(x)):
131 return 'l'
131 return 'l'
132 if 'x' in fallback(x):
132 if 'x' in fallback(x):
133 return 'x'
133 return 'x'
134 return ''
134 return ''
135 return f
135 return f
136 if self._checkexec:
136 if self._checkexec:
137 def f(x):
137 def f(x):
138 if 'l' in fallback(x):
138 if 'l' in fallback(x):
139 return 'l'
139 return 'l'
140 if util.is_exec(self._join(x)):
140 if util.is_exec(self._join(x)):
141 return 'x'
141 return 'x'
142 return ''
142 return ''
143 return f
143 return f
144 return fallback
144 return fallback
145
145
146 def getcwd(self):
146 def getcwd(self):
147 cwd = os.getcwd()
147 cwd = os.getcwd()
148 if cwd == self._root: return ''
148 if cwd == self._root: return ''
149 # self._root ends with a path separator if self._root is '/' or 'C:\'
149 # self._root ends with a path separator if self._root is '/' or 'C:\'
150 rootsep = self._root
150 rootsep = self._root
151 if not util.endswithsep(rootsep):
151 if not util.endswithsep(rootsep):
152 rootsep += os.sep
152 rootsep += os.sep
153 if cwd.startswith(rootsep):
153 if cwd.startswith(rootsep):
154 return cwd[len(rootsep):]
154 return cwd[len(rootsep):]
155 else:
155 else:
156 # we're outside the repo. return an absolute path.
156 # we're outside the repo. return an absolute path.
157 return cwd
157 return cwd
158
158
159 def pathto(self, f, cwd=None):
159 def pathto(self, f, cwd=None):
160 if cwd is None:
160 if cwd is None:
161 cwd = self.getcwd()
161 cwd = self.getcwd()
162 path = util.pathto(self._root, cwd, f)
162 path = util.pathto(self._root, cwd, f)
163 if self._slash:
163 if self._slash:
164 return util.normpath(path)
164 return util.normpath(path)
165 return path
165 return path
166
166
167 def __getitem__(self, key):
167 def __getitem__(self, key):
168 ''' current states:
168 ''' current states:
169 n normal
169 n normal
170 m needs merging
170 m needs merging
171 r marked for removal
171 r marked for removal
172 a marked for addition
172 a marked for addition
173 ? not tracked'''
173 ? not tracked'''
174 return self._map.get(key, ("?",))[0]
174 return self._map.get(key, ("?",))[0]
175
175
176 def __contains__(self, key):
176 def __contains__(self, key):
177 return key in self._map
177 return key in self._map
178
178
179 def __iter__(self):
179 def __iter__(self):
180 for x in util.sort(self._map):
180 for x in util.sort(self._map):
181 yield x
181 yield x
182
182
183 def parents(self):
183 def parents(self):
184 return self._pl
184 return self._pl
185
185
186 def branch(self):
186 def branch(self):
187 return self._branch
187 return self._branch
188
188
189 def setparents(self, p1, p2=nullid):
189 def setparents(self, p1, p2=nullid):
190 self._dirty = self._dirtypl = True
190 self._dirty = self._dirtypl = True
191 self._pl = p1, p2
191 self._pl = p1, p2
192
192
193 def setbranch(self, branch):
193 def setbranch(self, branch):
194 self._branch = branch
194 self._branch = branch
195 self._opener("branch", "w").write(branch + '\n')
195 self._opener("branch", "w").write(branch + '\n')
196
196
197 def _read(self):
197 def _read(self):
198 self._map = {}
198 self._map = {}
199 self._copymap = {}
199 self._copymap = {}
200 try:
200 try:
201 st = self._opener("dirstate").read()
201 st = self._opener("dirstate").read()
202 except IOError, err:
202 except IOError, err:
203 if err.errno != errno.ENOENT: raise
203 if err.errno != errno.ENOENT: raise
204 return
204 return
205 if not st:
205 if not st:
206 return
206 return
207
207
208 p = parsers.parse_dirstate(self._map, self._copymap, st)
208 p = parsers.parse_dirstate(self._map, self._copymap, st)
209 if not self._dirtypl:
209 if not self._dirtypl:
210 self._pl = p
210 self._pl = p
211
211
212 def invalidate(self):
212 def invalidate(self):
213 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
213 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
214 if a in self.__dict__:
214 if a in self.__dict__:
215 delattr(self, a)
215 delattr(self, a)
216 self._dirty = False
216 self._dirty = False
217
217
218 def copy(self, source, dest):
218 def copy(self, source, dest):
219 """Mark dest as a copy of source. Unmark dest if source is None.
219 """Mark dest as a copy of source. Unmark dest if source is None.
220 """
220 """
221 if source == dest:
221 if source == dest:
222 return
222 return
223 self._dirty = True
223 self._dirty = True
224 if source is not None:
224 if source is not None:
225 self._copymap[dest] = source
225 self._copymap[dest] = source
226 elif dest in self._copymap:
226 elif dest in self._copymap:
227 del self._copymap[dest]
227 del self._copymap[dest]
228
228
229 def copied(self, file):
229 def copied(self, file):
230 return self._copymap.get(file, None)
230 return self._copymap.get(file, None)
231
231
232 def copies(self):
232 def copies(self):
233 return self._copymap
233 return self._copymap
234
234
235 def _droppath(self, f):
235 def _droppath(self, f):
236 if self[f] not in "?r" and "_dirs" in self.__dict__:
236 if self[f] not in "?r" and "_dirs" in self.__dict__:
237 _decdirs(self._dirs, f)
237 _decdirs(self._dirs, f)
238
238
239 def _addpath(self, f, check=False):
239 def _addpath(self, f, check=False):
240 oldstate = self[f]
240 oldstate = self[f]
241 if check or oldstate == "r":
241 if check or oldstate == "r":
242 if '\r' in f or '\n' in f:
242 if '\r' in f or '\n' in f:
243 raise util.Abort(
243 raise util.Abort(
244 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
244 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
245 if f in self._dirs:
245 if f in self._dirs:
246 raise util.Abort(_('directory %r already in dirstate') % f)
246 raise util.Abort(_('directory %r already in dirstate') % f)
247 # shadows
247 # shadows
248 for d in _finddirs(f):
248 for d in _finddirs(f):
249 if d in self._dirs:
249 if d in self._dirs:
250 break
250 break
251 if d in self._map and self[d] != 'r':
251 if d in self._map and self[d] != 'r':
252 raise util.Abort(
252 raise util.Abort(
253 _('file %r in dirstate clashes with %r') % (d, f))
253 _('file %r in dirstate clashes with %r') % (d, f))
254 if oldstate in "?r" and "_dirs" in self.__dict__:
254 if oldstate in "?r" and "_dirs" in self.__dict__:
255 _incdirs(self._dirs, f)
255 _incdirs(self._dirs, f)
256
256
257 def normal(self, f):
257 def normal(self, f):
258 'mark a file normal and clean'
258 'mark a file normal and clean'
259 self._dirty = True
259 self._dirty = True
260 self._addpath(f)
260 self._addpath(f)
261 s = os.lstat(self._join(f))
261 s = os.lstat(self._join(f))
262 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
262 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
263 if f in self._copymap:
263 if f in self._copymap:
264 del self._copymap[f]
264 del self._copymap[f]
265
265
266 def normallookup(self, f):
266 def normallookup(self, f):
267 'mark a file normal, but possibly dirty'
267 'mark a file normal, but possibly dirty'
268 if self._pl[1] != nullid and f in self._map:
268 if self._pl[1] != nullid and f in self._map:
269 # if there is a merge going on and the file was either
269 # if there is a merge going on and the file was either
270 # in state 'm' or dirty before being removed, restore that state.
270 # in state 'm' or dirty before being removed, restore that state.
271 entry = self._map[f]
271 entry = self._map[f]
272 if entry[0] == 'r' and entry[2] in (-1, -2):
272 if entry[0] == 'r' and entry[2] in (-1, -2):
273 source = self._copymap.get(f)
273 source = self._copymap.get(f)
274 if entry[2] == -1:
274 if entry[2] == -1:
275 self.merge(f)
275 self.merge(f)
276 elif entry[2] == -2:
276 elif entry[2] == -2:
277 self.normaldirty(f)
277 self.normaldirty(f)
278 if source:
278 if source:
279 self.copy(source, f)
279 self.copy(source, f)
280 return
280 return
281 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
281 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
282 return
282 return
283 self._dirty = True
283 self._dirty = True
284 self._addpath(f)
284 self._addpath(f)
285 self._map[f] = ('n', 0, -1, -1)
285 self._map[f] = ('n', 0, -1, -1)
286 if f in self._copymap:
286 if f in self._copymap:
287 del self._copymap[f]
287 del self._copymap[f]
288
288
289 def normaldirty(self, f):
289 def normaldirty(self, f):
290 'mark a file normal, but dirty'
290 'mark a file normal, but dirty'
291 self._dirty = True
291 self._dirty = True
292 self._addpath(f)
292 self._addpath(f)
293 self._map[f] = ('n', 0, -2, -1)
293 self._map[f] = ('n', 0, -2, -1)
294 if f in self._copymap:
294 if f in self._copymap:
295 del self._copymap[f]
295 del self._copymap[f]
296
296
297 def add(self, f):
297 def add(self, f):
298 'mark a file added'
298 'mark a file added'
299 self._dirty = True
299 self._dirty = True
300 self._addpath(f, True)
300 self._addpath(f, True)
301 self._map[f] = ('a', 0, -1, -1)
301 self._map[f] = ('a', 0, -1, -1)
302 if f in self._copymap:
302 if f in self._copymap:
303 del self._copymap[f]
303 del self._copymap[f]
304
304
305 def remove(self, f):
305 def remove(self, f):
306 'mark a file removed'
306 'mark a file removed'
307 self._dirty = True
307 self._dirty = True
308 self._droppath(f)
308 self._droppath(f)
309 size = 0
309 size = 0
310 if self._pl[1] != nullid and f in self._map:
310 if self._pl[1] != nullid and f in self._map:
311 entry = self._map[f]
311 entry = self._map[f]
312 if entry[0] == 'm':
312 if entry[0] == 'm':
313 size = -1
313 size = -1
314 elif entry[0] == 'n' and entry[2] == -2:
314 elif entry[0] == 'n' and entry[2] == -2:
315 size = -2
315 size = -2
316 self._map[f] = ('r', 0, size, 0)
316 self._map[f] = ('r', 0, size, 0)
317 if size == 0 and f in self._copymap:
317 if size == 0 and f in self._copymap:
318 del self._copymap[f]
318 del self._copymap[f]
319
319
320 def merge(self, f):
320 def merge(self, f):
321 'mark a file merged'
321 'mark a file merged'
322 self._dirty = True
322 self._dirty = True
323 s = os.lstat(self._join(f))
323 s = os.lstat(self._join(f))
324 self._addpath(f)
324 self._addpath(f)
325 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
325 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
326 if f in self._copymap:
326 if f in self._copymap:
327 del self._copymap[f]
327 del self._copymap[f]
328
328
329 def forget(self, f):
329 def forget(self, f):
330 'forget a file'
330 'forget a file'
331 self._dirty = True
331 self._dirty = True
332 try:
332 try:
333 self._droppath(f)
333 self._droppath(f)
334 del self._map[f]
334 del self._map[f]
335 except KeyError:
335 except KeyError:
336 self._ui.warn(_("not in dirstate: %s\n") % f)
336 self._ui.warn(_("not in dirstate: %s\n") % f)
337
337
338 def _normalize(self, path, knownpath=False):
338 def _normalize(self, path, knownpath=False):
339 norm_path = os.path.normcase(path)
339 norm_path = os.path.normcase(path)
340 fold_path = self._foldmap.get(norm_path, None)
340 fold_path = self._foldmap.get(norm_path, None)
341 if fold_path is None:
341 if fold_path is None:
342 if knownpath or not os.path.exists(os.path.join(self._root, path)):
342 if knownpath or not os.path.exists(os.path.join(self._root, path)):
343 fold_path = path
343 fold_path = path
344 else:
344 else:
345 fold_path = self._foldmap.setdefault(norm_path,
345 fold_path = self._foldmap.setdefault(norm_path,
346 util.fspath(path, self._root))
346 util.fspath(path, self._root))
347 return fold_path
347 return fold_path
348
348
349 def clear(self):
349 def clear(self):
350 self._map = {}
350 self._map = {}
351 if "_dirs" in self.__dict__:
351 if "_dirs" in self.__dict__:
352 delattr(self, "_dirs");
352 delattr(self, "_dirs");
353 self._copymap = {}
353 self._copymap = {}
354 self._pl = [nullid, nullid]
354 self._pl = [nullid, nullid]
355 self._dirty = True
355 self._dirty = True
356
356
357 def rebuild(self, parent, files):
357 def rebuild(self, parent, files):
358 self.clear()
358 self.clear()
359 for f in files:
359 for f in files:
360 if 'x' in files.flags(f):
360 if 'x' in files.flags(f):
361 self._map[f] = ('n', 0777, -1, 0)
361 self._map[f] = ('n', 0777, -1, 0)
362 else:
362 else:
363 self._map[f] = ('n', 0666, -1, 0)
363 self._map[f] = ('n', 0666, -1, 0)
364 self._pl = (parent, nullid)
364 self._pl = (parent, nullid)
365 self._dirty = True
365 self._dirty = True
366
366
367 def write(self):
367 def write(self):
368 if not self._dirty:
368 if not self._dirty:
369 return
369 return
370 st = self._opener("dirstate", "w", atomictemp=True)
370 st = self._opener("dirstate", "w", atomictemp=True)
371
371
372 try:
372 try:
373 gran = int(self._ui.config('dirstate', 'granularity', 1))
373 gran = int(self._ui.config('dirstate', 'granularity', 1))
374 except ValueError:
374 except ValueError:
375 gran = 1
375 gran = 1
376 limit = sys.maxint
376 limit = sys.maxint
377 if gran > 0:
377 if gran > 0:
378 limit = util.fstat(st).st_mtime - gran
378 limit = util.fstat(st).st_mtime - gran
379
379
380 cs = cStringIO.StringIO()
380 cs = cStringIO.StringIO()
381 copymap = self._copymap
381 copymap = self._copymap
382 pack = struct.pack
382 pack = struct.pack
383 write = cs.write
383 write = cs.write
384 write("".join(self._pl))
384 write("".join(self._pl))
385 for f, e in self._map.iteritems():
385 for f, e in self._map.iteritems():
386 if f in copymap:
386 if f in copymap:
387 f = "%s\0%s" % (f, copymap[f])
387 f = "%s\0%s" % (f, copymap[f])
388 if e[3] > limit and e[0] == 'n':
388 if e[3] > limit and e[0] == 'n':
389 e = (e[0], 0, -1, -1)
389 e = (e[0], 0, -1, -1)
390 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
390 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
391 write(e)
391 write(e)
392 write(f)
392 write(f)
393 st.write(cs.getvalue())
393 st.write(cs.getvalue())
394 st.rename()
394 st.rename()
395 self._dirty = self._dirtypl = False
395 self._dirty = self._dirtypl = False
396
396
397 def _dirignore(self, f):
397 def _dirignore(self, f):
398 if f == '.':
398 if f == '.':
399 return False
399 return False
400 if self._ignore(f):
400 if self._ignore(f):
401 return True
401 return True
402 for p in _finddirs(f):
402 for p in _finddirs(f):
403 if self._ignore(p):
403 if self._ignore(p):
404 return True
404 return True
405 return False
405 return False
406
406
407 def walk(self, match, unknown, ignored):
407 def walk(self, match, unknown, ignored):
408 '''
408 '''
409 walk recursively through the directory tree, finding all files
409 walk recursively through the directory tree, finding all files
410 matched by the match function
410 matched by the match function
411
411
412 results are yielded in a tuple (filename, stat), where stat
412 results are yielded in a tuple (filename, stat), where stat
413 and st is the stat result if the file was found in the directory.
413 and st is the stat result if the file was found in the directory.
414 '''
414 '''
415
415
416 def fwarn(f, msg):
416 def fwarn(f, msg):
417 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
417 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
418 return False
418 return False
419 badfn = fwarn
419 badfn = fwarn
420 if hasattr(match, 'bad'):
420 if hasattr(match, 'bad'):
421 badfn = match.bad
421 badfn = match.bad
422
422
423 def badtype(f, mode):
423 def badtype(f, mode):
424 kind = 'unknown'
424 kind = 'unknown'
425 if stat.S_ISCHR(mode): kind = _('character device')
425 if stat.S_ISCHR(mode): kind = _('character device')
426 elif stat.S_ISBLK(mode): kind = _('block device')
426 elif stat.S_ISBLK(mode): kind = _('block device')
427 elif stat.S_ISFIFO(mode): kind = _('fifo')
427 elif stat.S_ISFIFO(mode): kind = _('fifo')
428 elif stat.S_ISSOCK(mode): kind = _('socket')
428 elif stat.S_ISSOCK(mode): kind = _('socket')
429 elif stat.S_ISDIR(mode): kind = _('directory')
429 elif stat.S_ISDIR(mode): kind = _('directory')
430 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
430 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
431 % (self.pathto(f), kind))
431 % (self.pathto(f), kind))
432
432
433 ignore = self._ignore
433 ignore = self._ignore
434 dirignore = self._dirignore
434 dirignore = self._dirignore
435 if ignored:
435 if ignored:
436 ignore = util.never
436 ignore = util.never
437 dirignore = util.never
437 dirignore = util.never
438 elif not unknown:
438 elif not unknown:
439 # if unknown and ignored are False, skip step 2
439 # if unknown and ignored are False, skip step 2
440 ignore = util.always
440 ignore = util.always
441 dirignore = util.always
441 dirignore = util.always
442
442
443 matchfn = match.matchfn
443 matchfn = match.matchfn
444 dmap = self._map
444 dmap = self._map
445 normpath = util.normpath
445 normpath = util.normpath
446 normalize = self.normalize
446 normalize = self.normalize
447 listdir = osutil.listdir
447 listdir = osutil.listdir
448 lstat = os.lstat
448 lstat = os.lstat
449 getkind = stat.S_IFMT
449 getkind = stat.S_IFMT
450 dirkind = stat.S_IFDIR
450 dirkind = stat.S_IFDIR
451 regkind = stat.S_IFREG
451 regkind = stat.S_IFREG
452 lnkkind = stat.S_IFLNK
452 lnkkind = stat.S_IFLNK
453 join = self._join
453 join = self._join
454 work = []
454 work = []
455 wadd = work.append
455 wadd = work.append
456
456
457 files = util.unique(match.files())
457 files = set(match.files())
458 if not files or '.' in files:
458 if not files or '.' in files:
459 files = ['']
459 files = ['']
460 results = {'.hg': None}
460 results = {'.hg': None}
461
461
462 # step 1: find all explicit files
462 # step 1: find all explicit files
463 for ff in util.sort(files):
463 for ff in util.sort(files):
464 nf = normalize(normpath(ff))
464 nf = normalize(normpath(ff))
465 if nf in results:
465 if nf in results:
466 continue
466 continue
467
467
468 try:
468 try:
469 st = lstat(join(nf))
469 st = lstat(join(nf))
470 kind = getkind(st.st_mode)
470 kind = getkind(st.st_mode)
471 if kind == dirkind:
471 if kind == dirkind:
472 if not dirignore(nf):
472 if not dirignore(nf):
473 wadd(nf)
473 wadd(nf)
474 elif kind == regkind or kind == lnkkind:
474 elif kind == regkind or kind == lnkkind:
475 results[nf] = st
475 results[nf] = st
476 else:
476 else:
477 badtype(ff, kind)
477 badtype(ff, kind)
478 if nf in dmap:
478 if nf in dmap:
479 results[nf] = None
479 results[nf] = None
480 except OSError, inst:
480 except OSError, inst:
481 keep = False
481 keep = False
482 prefix = nf + "/"
482 prefix = nf + "/"
483 for fn in dmap:
483 for fn in dmap:
484 if nf == fn or fn.startswith(prefix):
484 if nf == fn or fn.startswith(prefix):
485 keep = True
485 keep = True
486 break
486 break
487 if not keep:
487 if not keep:
488 if inst.errno != errno.ENOENT:
488 if inst.errno != errno.ENOENT:
489 fwarn(ff, inst.strerror)
489 fwarn(ff, inst.strerror)
490 elif badfn(ff, inst.strerror):
490 elif badfn(ff, inst.strerror):
491 if (nf in dmap or not ignore(nf)) and matchfn(nf):
491 if (nf in dmap or not ignore(nf)) and matchfn(nf):
492 results[nf] = None
492 results[nf] = None
493
493
494 # step 2: visit subdirectories
494 # step 2: visit subdirectories
495 while work:
495 while work:
496 nd = work.pop()
496 nd = work.pop()
497 if hasattr(match, 'dir'):
497 if hasattr(match, 'dir'):
498 match.dir(nd)
498 match.dir(nd)
499 skip = None
499 skip = None
500 if nd == '.':
500 if nd == '.':
501 nd = ''
501 nd = ''
502 else:
502 else:
503 skip = '.hg'
503 skip = '.hg'
504 try:
504 try:
505 entries = listdir(join(nd), stat=True, skip=skip)
505 entries = listdir(join(nd), stat=True, skip=skip)
506 except OSError, inst:
506 except OSError, inst:
507 if inst.errno == errno.EACCES:
507 if inst.errno == errno.EACCES:
508 fwarn(nd, inst.strerror)
508 fwarn(nd, inst.strerror)
509 continue
509 continue
510 raise
510 raise
511 for f, kind, st in entries:
511 for f, kind, st in entries:
512 nf = normalize(nd and (nd + "/" + f) or f, True)
512 nf = normalize(nd and (nd + "/" + f) or f, True)
513 if nf not in results:
513 if nf not in results:
514 if kind == dirkind:
514 if kind == dirkind:
515 if not ignore(nf):
515 if not ignore(nf):
516 wadd(nf)
516 wadd(nf)
517 if nf in dmap and matchfn(nf):
517 if nf in dmap and matchfn(nf):
518 results[nf] = None
518 results[nf] = None
519 elif kind == regkind or kind == lnkkind:
519 elif kind == regkind or kind == lnkkind:
520 if nf in dmap:
520 if nf in dmap:
521 if matchfn(nf):
521 if matchfn(nf):
522 results[nf] = st
522 results[nf] = st
523 elif matchfn(nf) and not ignore(nf):
523 elif matchfn(nf) and not ignore(nf):
524 results[nf] = st
524 results[nf] = st
525 elif nf in dmap and matchfn(nf):
525 elif nf in dmap and matchfn(nf):
526 results[nf] = None
526 results[nf] = None
527
527
528 # step 3: report unseen items in the dmap hash
528 # step 3: report unseen items in the dmap hash
529 visit = util.sort([f for f in dmap if f not in results and match(f)])
529 visit = util.sort([f for f in dmap if f not in results and match(f)])
530 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
530 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
531 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
531 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
532 st = None
532 st = None
533 results[nf] = st
533 results[nf] = st
534
534
535 del results['.hg']
535 del results['.hg']
536 return results
536 return results
537
537
538 def status(self, match, ignored, clean, unknown):
538 def status(self, match, ignored, clean, unknown):
539 listignored, listclean, listunknown = ignored, clean, unknown
539 listignored, listclean, listunknown = ignored, clean, unknown
540 lookup, modified, added, unknown, ignored = [], [], [], [], []
540 lookup, modified, added, unknown, ignored = [], [], [], [], []
541 removed, deleted, clean = [], [], []
541 removed, deleted, clean = [], [], []
542
542
543 dmap = self._map
543 dmap = self._map
544 ladd = lookup.append
544 ladd = lookup.append
545 madd = modified.append
545 madd = modified.append
546 aadd = added.append
546 aadd = added.append
547 uadd = unknown.append
547 uadd = unknown.append
548 iadd = ignored.append
548 iadd = ignored.append
549 radd = removed.append
549 radd = removed.append
550 dadd = deleted.append
550 dadd = deleted.append
551 cadd = clean.append
551 cadd = clean.append
552
552
553 for fn, st in self.walk(match, listunknown, listignored).iteritems():
553 for fn, st in self.walk(match, listunknown, listignored).iteritems():
554 if fn not in dmap:
554 if fn not in dmap:
555 if (listignored or match.exact(fn)) and self._dirignore(fn):
555 if (listignored or match.exact(fn)) and self._dirignore(fn):
556 if listignored:
556 if listignored:
557 iadd(fn)
557 iadd(fn)
558 elif listunknown:
558 elif listunknown:
559 uadd(fn)
559 uadd(fn)
560 continue
560 continue
561
561
562 state, mode, size, time = dmap[fn]
562 state, mode, size, time = dmap[fn]
563
563
564 if not st and state in "nma":
564 if not st and state in "nma":
565 dadd(fn)
565 dadd(fn)
566 elif state == 'n':
566 elif state == 'n':
567 if (size >= 0 and
567 if (size >= 0 and
568 (size != st.st_size
568 (size != st.st_size
569 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
569 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
570 or size == -2
570 or size == -2
571 or fn in self._copymap):
571 or fn in self._copymap):
572 madd(fn)
572 madd(fn)
573 elif time != int(st.st_mtime):
573 elif time != int(st.st_mtime):
574 ladd(fn)
574 ladd(fn)
575 elif listclean:
575 elif listclean:
576 cadd(fn)
576 cadd(fn)
577 elif state == 'm':
577 elif state == 'm':
578 madd(fn)
578 madd(fn)
579 elif state == 'a':
579 elif state == 'a':
580 aadd(fn)
580 aadd(fn)
581 elif state == 'r':
581 elif state == 'r':
582 radd(fn)
582 radd(fn)
583
583
584 return (lookup, modified, added, removed, deleted, unknown, ignored,
584 return (lookup, modified, added, removed, deleted, unknown, ignored,
585 clean)
585 clean)
@@ -1,2174 +1,2174 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store, encoding
12 import lock, transaction, stat, errno, ui, store, encoding
13 import os, time, util, extensions, hook, inspect, error
13 import os, time, util, extensions, hook, inspect, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 from lock import release
17 from lock import release
18
18
19 class localrepository(repo.repository):
19 class localrepository(repo.repository):
20 capabilities = set(('lookup', 'changegroupsubset'))
20 capabilities = set(('lookup', 'changegroupsubset'))
21 supported = ('revlogv1', 'store', 'fncache')
21 supported = ('revlogv1', 'store', 'fncache')
22
22
23 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
24 repo.repository.__init__(self)
24 repo.repository.__init__(self)
25 self.root = os.path.realpath(path)
25 self.root = os.path.realpath(path)
26 self.path = os.path.join(self.root, ".hg")
26 self.path = os.path.join(self.root, ".hg")
27 self.origroot = path
27 self.origroot = path
28 self.opener = util.opener(self.path)
28 self.opener = util.opener(self.path)
29 self.wopener = util.opener(self.root)
29 self.wopener = util.opener(self.root)
30
30
31 if not os.path.isdir(self.path):
31 if not os.path.isdir(self.path):
32 if create:
32 if create:
33 if not os.path.exists(path):
33 if not os.path.exists(path):
34 os.mkdir(path)
34 os.mkdir(path)
35 os.mkdir(self.path)
35 os.mkdir(self.path)
36 requirements = ["revlogv1"]
36 requirements = ["revlogv1"]
37 if parentui.configbool('format', 'usestore', True):
37 if parentui.configbool('format', 'usestore', True):
38 os.mkdir(os.path.join(self.path, "store"))
38 os.mkdir(os.path.join(self.path, "store"))
39 requirements.append("store")
39 requirements.append("store")
40 if parentui.configbool('format', 'usefncache', True):
40 if parentui.configbool('format', 'usefncache', True):
41 requirements.append("fncache")
41 requirements.append("fncache")
42 # create an invalid changelog
42 # create an invalid changelog
43 self.opener("00changelog.i", "a").write(
43 self.opener("00changelog.i", "a").write(
44 '\0\0\0\2' # represents revlogv2
44 '\0\0\0\2' # represents revlogv2
45 ' dummy changelog to prevent using the old repo layout'
45 ' dummy changelog to prevent using the old repo layout'
46 )
46 )
47 reqfile = self.opener("requires", "w")
47 reqfile = self.opener("requires", "w")
48 for r in requirements:
48 for r in requirements:
49 reqfile.write("%s\n" % r)
49 reqfile.write("%s\n" % r)
50 reqfile.close()
50 reqfile.close()
51 else:
51 else:
52 raise error.RepoError(_("repository %s not found") % path)
52 raise error.RepoError(_("repository %s not found") % path)
53 elif create:
53 elif create:
54 raise error.RepoError(_("repository %s already exists") % path)
54 raise error.RepoError(_("repository %s already exists") % path)
55 else:
55 else:
56 # find requirements
56 # find requirements
57 requirements = []
57 requirements = []
58 try:
58 try:
59 requirements = self.opener("requires").read().splitlines()
59 requirements = self.opener("requires").read().splitlines()
60 for r in requirements:
60 for r in requirements:
61 if r not in self.supported:
61 if r not in self.supported:
62 raise error.RepoError(_("requirement '%s' not supported") % r)
62 raise error.RepoError(_("requirement '%s' not supported") % r)
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.ui = ui.ui(parentui=parentui)
73 self.ui = ui.ui(parentui=parentui)
74 try:
74 try:
75 self.ui.readconfig(self.join("hgrc"), self.root)
75 self.ui.readconfig(self.join("hgrc"), self.root)
76 extensions.loadall(self.ui)
76 extensions.loadall(self.ui)
77 except IOError:
77 except IOError:
78 pass
78 pass
79
79
80 self.tagscache = None
80 self.tagscache = None
81 self._tagstypecache = None
81 self._tagstypecache = None
82 self.branchcache = None
82 self.branchcache = None
83 self._ubranchcache = None # UTF-8 version of branchcache
83 self._ubranchcache = None # UTF-8 version of branchcache
84 self._branchcachetip = None
84 self._branchcachetip = None
85 self.nodetagscache = None
85 self.nodetagscache = None
86 self.filterpats = {}
86 self.filterpats = {}
87 self._datafilters = {}
87 self._datafilters = {}
88 self._transref = self._lockref = self._wlockref = None
88 self._transref = self._lockref = self._wlockref = None
89
89
90 def __getattr__(self, name):
90 def __getattr__(self, name):
91 if name == 'changelog':
91 if name == 'changelog':
92 self.changelog = changelog.changelog(self.sopener)
92 self.changelog = changelog.changelog(self.sopener)
93 if 'HG_PENDING' in os.environ:
93 if 'HG_PENDING' in os.environ:
94 p = os.environ['HG_PENDING']
94 p = os.environ['HG_PENDING']
95 if p.startswith(self.root):
95 if p.startswith(self.root):
96 self.changelog.readpending('00changelog.i.a')
96 self.changelog.readpending('00changelog.i.a')
97 self.sopener.defversion = self.changelog.version
97 self.sopener.defversion = self.changelog.version
98 return self.changelog
98 return self.changelog
99 if name == 'manifest':
99 if name == 'manifest':
100 self.changelog
100 self.changelog
101 self.manifest = manifest.manifest(self.sopener)
101 self.manifest = manifest.manifest(self.sopener)
102 return self.manifest
102 return self.manifest
103 if name == 'dirstate':
103 if name == 'dirstate':
104 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
104 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
105 return self.dirstate
105 return self.dirstate
106 else:
106 else:
107 raise AttributeError(name)
107 raise AttributeError(name)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid == None:
110 if changeid == None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, parent=None,
132 def _tag(self, names, node, message, local, user, date, parent=None,
133 extra={}):
133 extra={}):
134 use_dirstate = parent is None
134 use_dirstate = parent is None
135
135
136 if isinstance(names, str):
136 if isinstance(names, str):
137 allchars = names
137 allchars = names
138 names = (names,)
138 names = (names,)
139 else:
139 else:
140 allchars = ''.join(names)
140 allchars = ''.join(names)
141 for c in self.tag_disallowed:
141 for c in self.tag_disallowed:
142 if c in allchars:
142 if c in allchars:
143 raise util.Abort(_('%r cannot be used in a tag name') % c)
143 raise util.Abort(_('%r cannot be used in a tag name') % c)
144
144
145 for name in names:
145 for name in names:
146 self.hook('pretag', throw=True, node=hex(node), tag=name,
146 self.hook('pretag', throw=True, node=hex(node), tag=name,
147 local=local)
147 local=local)
148
148
149 def writetags(fp, names, munge, prevtags):
149 def writetags(fp, names, munge, prevtags):
150 fp.seek(0, 2)
150 fp.seek(0, 2)
151 if prevtags and prevtags[-1] != '\n':
151 if prevtags and prevtags[-1] != '\n':
152 fp.write('\n')
152 fp.write('\n')
153 for name in names:
153 for name in names:
154 m = munge and munge(name) or name
154 m = munge and munge(name) or name
155 if self._tagstypecache and name in self._tagstypecache:
155 if self._tagstypecache and name in self._tagstypecache:
156 old = self.tagscache.get(name, nullid)
156 old = self.tagscache.get(name, nullid)
157 fp.write('%s %s\n' % (hex(old), m))
157 fp.write('%s %s\n' % (hex(old), m))
158 fp.write('%s %s\n' % (hex(node), m))
158 fp.write('%s %s\n' % (hex(node), m))
159 fp.close()
159 fp.close()
160
160
161 prevtags = ''
161 prevtags = ''
162 if local:
162 if local:
163 try:
163 try:
164 fp = self.opener('localtags', 'r+')
164 fp = self.opener('localtags', 'r+')
165 except IOError:
165 except IOError:
166 fp = self.opener('localtags', 'a')
166 fp = self.opener('localtags', 'a')
167 else:
167 else:
168 prevtags = fp.read()
168 prevtags = fp.read()
169
169
170 # local tags are stored in the current charset
170 # local tags are stored in the current charset
171 writetags(fp, names, None, prevtags)
171 writetags(fp, names, None, prevtags)
172 for name in names:
172 for name in names:
173 self.hook('tag', node=hex(node), tag=name, local=local)
173 self.hook('tag', node=hex(node), tag=name, local=local)
174 return
174 return
175
175
176 if use_dirstate:
176 if use_dirstate:
177 try:
177 try:
178 fp = self.wfile('.hgtags', 'rb+')
178 fp = self.wfile('.hgtags', 'rb+')
179 except IOError:
179 except IOError:
180 fp = self.wfile('.hgtags', 'ab')
180 fp = self.wfile('.hgtags', 'ab')
181 else:
181 else:
182 prevtags = fp.read()
182 prevtags = fp.read()
183 else:
183 else:
184 try:
184 try:
185 prevtags = self.filectx('.hgtags', parent).data()
185 prevtags = self.filectx('.hgtags', parent).data()
186 except error.LookupError:
186 except error.LookupError:
187 pass
187 pass
188 fp = self.wfile('.hgtags', 'wb')
188 fp = self.wfile('.hgtags', 'wb')
189 if prevtags:
189 if prevtags:
190 fp.write(prevtags)
190 fp.write(prevtags)
191
191
192 # committed tags are stored in UTF-8
192 # committed tags are stored in UTF-8
193 writetags(fp, names, encoding.fromlocal, prevtags)
193 writetags(fp, names, encoding.fromlocal, prevtags)
194
194
195 if use_dirstate and '.hgtags' not in self.dirstate:
195 if use_dirstate and '.hgtags' not in self.dirstate:
196 self.add(['.hgtags'])
196 self.add(['.hgtags'])
197
197
198 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
198 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
199 extra=extra)
199 extra=extra)
200
200
201 for name in names:
201 for name in names:
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203
203
204 return tagnode
204 return tagnode
205
205
206 def tag(self, names, node, message, local, user, date):
206 def tag(self, names, node, message, local, user, date):
207 '''tag a revision with one or more symbolic names.
207 '''tag a revision with one or more symbolic names.
208
208
209 names is a list of strings or, when adding a single tag, names may be a
209 names is a list of strings or, when adding a single tag, names may be a
210 string.
210 string.
211
211
212 if local is True, the tags are stored in a per-repository file.
212 if local is True, the tags are stored in a per-repository file.
213 otherwise, they are stored in the .hgtags file, and a new
213 otherwise, they are stored in the .hgtags file, and a new
214 changeset is committed with the change.
214 changeset is committed with the change.
215
215
216 keyword arguments:
216 keyword arguments:
217
217
218 local: whether to store tags in non-version-controlled file
218 local: whether to store tags in non-version-controlled file
219 (default False)
219 (default False)
220
220
221 message: commit message to use if committing
221 message: commit message to use if committing
222
222
223 user: name of user to use if committing
223 user: name of user to use if committing
224
224
225 date: date tuple to use if committing'''
225 date: date tuple to use if committing'''
226
226
227 for x in self.status()[:5]:
227 for x in self.status()[:5]:
228 if '.hgtags' in x:
228 if '.hgtags' in x:
229 raise util.Abort(_('working copy of .hgtags is changed '
229 raise util.Abort(_('working copy of .hgtags is changed '
230 '(please commit .hgtags manually)'))
230 '(please commit .hgtags manually)'))
231
231
232 self.tags() # instantiate the cache
232 self.tags() # instantiate the cache
233 self._tag(names, node, message, local, user, date)
233 self._tag(names, node, message, local, user, date)
234
234
235 def tags(self):
235 def tags(self):
236 '''return a mapping of tag to node'''
236 '''return a mapping of tag to node'''
237 if self.tagscache:
237 if self.tagscache:
238 return self.tagscache
238 return self.tagscache
239
239
240 globaltags = {}
240 globaltags = {}
241 tagtypes = {}
241 tagtypes = {}
242
242
243 def readtags(lines, fn, tagtype):
243 def readtags(lines, fn, tagtype):
244 filetags = {}
244 filetags = {}
245 count = 0
245 count = 0
246
246
247 def warn(msg):
247 def warn(msg):
248 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
249
249
250 for l in lines:
250 for l in lines:
251 count += 1
251 count += 1
252 if not l:
252 if not l:
253 continue
253 continue
254 s = l.split(" ", 1)
254 s = l.split(" ", 1)
255 if len(s) != 2:
255 if len(s) != 2:
256 warn(_("cannot parse entry"))
256 warn(_("cannot parse entry"))
257 continue
257 continue
258 node, key = s
258 node, key = s
259 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 key = encoding.tolocal(key.strip()) # stored in UTF-8
260 try:
260 try:
261 bin_n = bin(node)
261 bin_n = bin(node)
262 except TypeError:
262 except TypeError:
263 warn(_("node '%s' is not well formed") % node)
263 warn(_("node '%s' is not well formed") % node)
264 continue
264 continue
265 if bin_n not in self.changelog.nodemap:
265 if bin_n not in self.changelog.nodemap:
266 warn(_("tag '%s' refers to unknown node") % key)
266 warn(_("tag '%s' refers to unknown node") % key)
267 continue
267 continue
268
268
269 h = []
269 h = []
270 if key in filetags:
270 if key in filetags:
271 n, h = filetags[key]
271 n, h = filetags[key]
272 h.append(n)
272 h.append(n)
273 filetags[key] = (bin_n, h)
273 filetags[key] = (bin_n, h)
274
274
275 for k, nh in filetags.iteritems():
275 for k, nh in filetags.iteritems():
276 if k not in globaltags:
276 if k not in globaltags:
277 globaltags[k] = nh
277 globaltags[k] = nh
278 tagtypes[k] = tagtype
278 tagtypes[k] = tagtype
279 continue
279 continue
280
280
281 # we prefer the global tag if:
281 # we prefer the global tag if:
282 # it supercedes us OR
282 # it supercedes us OR
283 # mutual supercedes and it has a higher rank
283 # mutual supercedes and it has a higher rank
284 # otherwise we win because we're tip-most
284 # otherwise we win because we're tip-most
285 an, ah = nh
285 an, ah = nh
286 bn, bh = globaltags[k]
286 bn, bh = globaltags[k]
287 if (bn != an and an in bh and
287 if (bn != an and an in bh and
288 (bn not in ah or len(bh) > len(ah))):
288 (bn not in ah or len(bh) > len(ah))):
289 an = bn
289 an = bn
290 ah.extend([n for n in bh if n not in ah])
290 ah.extend([n for n in bh if n not in ah])
291 globaltags[k] = an, ah
291 globaltags[k] = an, ah
292 tagtypes[k] = tagtype
292 tagtypes[k] = tagtype
293
293
294 # read the tags file from each head, ending with the tip
294 # read the tags file from each head, ending with the tip
295 f = None
295 f = None
296 for rev, node, fnode in self._hgtagsnodes():
296 for rev, node, fnode in self._hgtagsnodes():
297 f = (f and f.filectx(fnode) or
297 f = (f and f.filectx(fnode) or
298 self.filectx('.hgtags', fileid=fnode))
298 self.filectx('.hgtags', fileid=fnode))
299 readtags(f.data().splitlines(), f, "global")
299 readtags(f.data().splitlines(), f, "global")
300
300
301 try:
301 try:
302 data = encoding.fromlocal(self.opener("localtags").read())
302 data = encoding.fromlocal(self.opener("localtags").read())
303 # localtags are stored in the local character set
303 # localtags are stored in the local character set
304 # while the internal tag table is stored in UTF-8
304 # while the internal tag table is stored in UTF-8
305 readtags(data.splitlines(), "localtags", "local")
305 readtags(data.splitlines(), "localtags", "local")
306 except IOError:
306 except IOError:
307 pass
307 pass
308
308
309 self.tagscache = {}
309 self.tagscache = {}
310 self._tagstypecache = {}
310 self._tagstypecache = {}
311 for k, nh in globaltags.iteritems():
311 for k, nh in globaltags.iteritems():
312 n = nh[0]
312 n = nh[0]
313 if n != nullid:
313 if n != nullid:
314 self.tagscache[k] = n
314 self.tagscache[k] = n
315 self._tagstypecache[k] = tagtypes[k]
315 self._tagstypecache[k] = tagtypes[k]
316 self.tagscache['tip'] = self.changelog.tip()
316 self.tagscache['tip'] = self.changelog.tip()
317 return self.tagscache
317 return self.tagscache
318
318
319 def tagtype(self, tagname):
319 def tagtype(self, tagname):
320 '''
320 '''
321 return the type of the given tag. result can be:
321 return the type of the given tag. result can be:
322
322
323 'local' : a local tag
323 'local' : a local tag
324 'global' : a global tag
324 'global' : a global tag
325 None : tag does not exist
325 None : tag does not exist
326 '''
326 '''
327
327
328 self.tags()
328 self.tags()
329
329
330 return self._tagstypecache.get(tagname)
330 return self._tagstypecache.get(tagname)
331
331
332 def _hgtagsnodes(self):
332 def _hgtagsnodes(self):
333 heads = self.heads()
333 heads = self.heads()
334 heads.reverse()
334 heads.reverse()
335 last = {}
335 last = {}
336 ret = []
336 ret = []
337 for node in heads:
337 for node in heads:
338 c = self[node]
338 c = self[node]
339 rev = c.rev()
339 rev = c.rev()
340 try:
340 try:
341 fnode = c.filenode('.hgtags')
341 fnode = c.filenode('.hgtags')
342 except error.LookupError:
342 except error.LookupError:
343 continue
343 continue
344 ret.append((rev, node, fnode))
344 ret.append((rev, node, fnode))
345 if fnode in last:
345 if fnode in last:
346 ret[last[fnode]] = None
346 ret[last[fnode]] = None
347 last[fnode] = len(ret) - 1
347 last[fnode] = len(ret) - 1
348 return [item for item in ret if item]
348 return [item for item in ret if item]
349
349
350 def tagslist(self):
350 def tagslist(self):
351 '''return a list of tags ordered by revision'''
351 '''return a list of tags ordered by revision'''
352 l = []
352 l = []
353 for t, n in self.tags().iteritems():
353 for t, n in self.tags().iteritems():
354 try:
354 try:
355 r = self.changelog.rev(n)
355 r = self.changelog.rev(n)
356 except:
356 except:
357 r = -2 # sort to the beginning of the list if unknown
357 r = -2 # sort to the beginning of the list if unknown
358 l.append((r, t, n))
358 l.append((r, t, n))
359 return [(t, n) for r, t, n in util.sort(l)]
359 return [(t, n) for r, t, n in util.sort(l)]
360
360
361 def nodetags(self, node):
361 def nodetags(self, node):
362 '''return the tags associated with a node'''
362 '''return the tags associated with a node'''
363 if not self.nodetagscache:
363 if not self.nodetagscache:
364 self.nodetagscache = {}
364 self.nodetagscache = {}
365 for t, n in self.tags().iteritems():
365 for t, n in self.tags().iteritems():
366 self.nodetagscache.setdefault(n, []).append(t)
366 self.nodetagscache.setdefault(n, []).append(t)
367 return self.nodetagscache.get(node, [])
367 return self.nodetagscache.get(node, [])
368
368
369 def _branchtags(self, partial, lrev):
369 def _branchtags(self, partial, lrev):
370 # TODO: rename this function?
370 # TODO: rename this function?
371 tiprev = len(self) - 1
371 tiprev = len(self) - 1
372 if lrev != tiprev:
372 if lrev != tiprev:
373 self._updatebranchcache(partial, lrev+1, tiprev+1)
373 self._updatebranchcache(partial, lrev+1, tiprev+1)
374 self._writebranchcache(partial, self.changelog.tip(), tiprev)
374 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375
375
376 return partial
376 return partial
377
377
378 def _branchheads(self):
378 def _branchheads(self):
379 tip = self.changelog.tip()
379 tip = self.changelog.tip()
380 if self.branchcache is not None and self._branchcachetip == tip:
380 if self.branchcache is not None and self._branchcachetip == tip:
381 return self.branchcache
381 return self.branchcache
382
382
383 oldtip = self._branchcachetip
383 oldtip = self._branchcachetip
384 self._branchcachetip = tip
384 self._branchcachetip = tip
385 if self.branchcache is None:
385 if self.branchcache is None:
386 self.branchcache = {} # avoid recursion in changectx
386 self.branchcache = {} # avoid recursion in changectx
387 else:
387 else:
388 self.branchcache.clear() # keep using the same dict
388 self.branchcache.clear() # keep using the same dict
389 if oldtip is None or oldtip not in self.changelog.nodemap:
389 if oldtip is None or oldtip not in self.changelog.nodemap:
390 partial, last, lrev = self._readbranchcache()
390 partial, last, lrev = self._readbranchcache()
391 else:
391 else:
392 lrev = self.changelog.rev(oldtip)
392 lrev = self.changelog.rev(oldtip)
393 partial = self._ubranchcache
393 partial = self._ubranchcache
394
394
395 self._branchtags(partial, lrev)
395 self._branchtags(partial, lrev)
396 # this private cache holds all heads (not just tips)
396 # this private cache holds all heads (not just tips)
397 self._ubranchcache = partial
397 self._ubranchcache = partial
398
398
399 # the branch cache is stored on disk as UTF-8, but in the local
399 # the branch cache is stored on disk as UTF-8, but in the local
400 # charset internally
400 # charset internally
401 for k, v in partial.iteritems():
401 for k, v in partial.iteritems():
402 self.branchcache[encoding.tolocal(k)] = v
402 self.branchcache[encoding.tolocal(k)] = v
403 return self.branchcache
403 return self.branchcache
404
404
405
405
406 def branchtags(self):
406 def branchtags(self):
407 '''return a dict where branch names map to the tipmost head of
407 '''return a dict where branch names map to the tipmost head of
408 the branch, open heads come before closed'''
408 the branch, open heads come before closed'''
409 bt = {}
409 bt = {}
410 for bn, heads in self._branchheads().iteritems():
410 for bn, heads in self._branchheads().iteritems():
411 head = None
411 head = None
412 for i in range(len(heads)-1, -1, -1):
412 for i in range(len(heads)-1, -1, -1):
413 h = heads[i]
413 h = heads[i]
414 if 'close' not in self.changelog.read(h)[5]:
414 if 'close' not in self.changelog.read(h)[5]:
415 head = h
415 head = h
416 break
416 break
417 # no open heads were found
417 # no open heads were found
418 if head is None:
418 if head is None:
419 head = heads[-1]
419 head = heads[-1]
420 bt[bn] = head
420 bt[bn] = head
421 return bt
421 return bt
422
422
423
423
424 def _readbranchcache(self):
424 def _readbranchcache(self):
425 partial = {}
425 partial = {}
426 try:
426 try:
427 f = self.opener("branchheads.cache")
427 f = self.opener("branchheads.cache")
428 lines = f.read().split('\n')
428 lines = f.read().split('\n')
429 f.close()
429 f.close()
430 except (IOError, OSError):
430 except (IOError, OSError):
431 return {}, nullid, nullrev
431 return {}, nullid, nullrev
432
432
433 try:
433 try:
434 last, lrev = lines.pop(0).split(" ", 1)
434 last, lrev = lines.pop(0).split(" ", 1)
435 last, lrev = bin(last), int(lrev)
435 last, lrev = bin(last), int(lrev)
436 if lrev >= len(self) or self[lrev].node() != last:
436 if lrev >= len(self) or self[lrev].node() != last:
437 # invalidate the cache
437 # invalidate the cache
438 raise ValueError('invalidating branch cache (tip differs)')
438 raise ValueError('invalidating branch cache (tip differs)')
439 for l in lines:
439 for l in lines:
440 if not l: continue
440 if not l: continue
441 node, label = l.split(" ", 1)
441 node, label = l.split(" ", 1)
442 partial.setdefault(label.strip(), []).append(bin(node))
442 partial.setdefault(label.strip(), []).append(bin(node))
443 except KeyboardInterrupt:
443 except KeyboardInterrupt:
444 raise
444 raise
445 except Exception, inst:
445 except Exception, inst:
446 if self.ui.debugflag:
446 if self.ui.debugflag:
447 self.ui.warn(str(inst), '\n')
447 self.ui.warn(str(inst), '\n')
448 partial, last, lrev = {}, nullid, nullrev
448 partial, last, lrev = {}, nullid, nullrev
449 return partial, last, lrev
449 return partial, last, lrev
450
450
451 def _writebranchcache(self, branches, tip, tiprev):
451 def _writebranchcache(self, branches, tip, tiprev):
452 try:
452 try:
453 f = self.opener("branchheads.cache", "w", atomictemp=True)
453 f = self.opener("branchheads.cache", "w", atomictemp=True)
454 f.write("%s %s\n" % (hex(tip), tiprev))
454 f.write("%s %s\n" % (hex(tip), tiprev))
455 for label, nodes in branches.iteritems():
455 for label, nodes in branches.iteritems():
456 for node in nodes:
456 for node in nodes:
457 f.write("%s %s\n" % (hex(node), label))
457 f.write("%s %s\n" % (hex(node), label))
458 f.rename()
458 f.rename()
459 except (IOError, OSError):
459 except (IOError, OSError):
460 pass
460 pass
461
461
462 def _updatebranchcache(self, partial, start, end):
462 def _updatebranchcache(self, partial, start, end):
463 for r in xrange(start, end):
463 for r in xrange(start, end):
464 c = self[r]
464 c = self[r]
465 b = c.branch()
465 b = c.branch()
466 bheads = partial.setdefault(b, [])
466 bheads = partial.setdefault(b, [])
467 bheads.append(c.node())
467 bheads.append(c.node())
468 for p in c.parents():
468 for p in c.parents():
469 pn = p.node()
469 pn = p.node()
470 if pn in bheads:
470 if pn in bheads:
471 bheads.remove(pn)
471 bheads.remove(pn)
472
472
473 def lookup(self, key):
473 def lookup(self, key):
474 if isinstance(key, int):
474 if isinstance(key, int):
475 return self.changelog.node(key)
475 return self.changelog.node(key)
476 elif key == '.':
476 elif key == '.':
477 return self.dirstate.parents()[0]
477 return self.dirstate.parents()[0]
478 elif key == 'null':
478 elif key == 'null':
479 return nullid
479 return nullid
480 elif key == 'tip':
480 elif key == 'tip':
481 return self.changelog.tip()
481 return self.changelog.tip()
482 n = self.changelog._match(key)
482 n = self.changelog._match(key)
483 if n:
483 if n:
484 return n
484 return n
485 if key in self.tags():
485 if key in self.tags():
486 return self.tags()[key]
486 return self.tags()[key]
487 if key in self.branchtags():
487 if key in self.branchtags():
488 return self.branchtags()[key]
488 return self.branchtags()[key]
489 n = self.changelog._partialmatch(key)
489 n = self.changelog._partialmatch(key)
490 if n:
490 if n:
491 return n
491 return n
492 try:
492 try:
493 if len(key) == 20:
493 if len(key) == 20:
494 key = hex(key)
494 key = hex(key)
495 except:
495 except:
496 pass
496 pass
497 raise error.RepoError(_("unknown revision '%s'") % key)
497 raise error.RepoError(_("unknown revision '%s'") % key)
498
498
499 def local(self):
499 def local(self):
500 return True
500 return True
501
501
502 def join(self, f):
502 def join(self, f):
503 return os.path.join(self.path, f)
503 return os.path.join(self.path, f)
504
504
505 def wjoin(self, f):
505 def wjoin(self, f):
506 return os.path.join(self.root, f)
506 return os.path.join(self.root, f)
507
507
508 def rjoin(self, f):
508 def rjoin(self, f):
509 return os.path.join(self.root, util.pconvert(f))
509 return os.path.join(self.root, util.pconvert(f))
510
510
511 def file(self, f):
511 def file(self, f):
512 if f[0] == '/':
512 if f[0] == '/':
513 f = f[1:]
513 f = f[1:]
514 return filelog.filelog(self.sopener, f)
514 return filelog.filelog(self.sopener, f)
515
515
516 def changectx(self, changeid):
516 def changectx(self, changeid):
517 return self[changeid]
517 return self[changeid]
518
518
519 def parents(self, changeid=None):
519 def parents(self, changeid=None):
520 '''get list of changectxs for parents of changeid'''
520 '''get list of changectxs for parents of changeid'''
521 return self[changeid].parents()
521 return self[changeid].parents()
522
522
523 def filectx(self, path, changeid=None, fileid=None):
523 def filectx(self, path, changeid=None, fileid=None):
524 """changeid can be a changeset revision, node, or tag.
524 """changeid can be a changeset revision, node, or tag.
525 fileid can be a file revision or node."""
525 fileid can be a file revision or node."""
526 return context.filectx(self, path, changeid, fileid)
526 return context.filectx(self, path, changeid, fileid)
527
527
528 def getcwd(self):
528 def getcwd(self):
529 return self.dirstate.getcwd()
529 return self.dirstate.getcwd()
530
530
531 def pathto(self, f, cwd=None):
531 def pathto(self, f, cwd=None):
532 return self.dirstate.pathto(f, cwd)
532 return self.dirstate.pathto(f, cwd)
533
533
534 def wfile(self, f, mode='r'):
534 def wfile(self, f, mode='r'):
535 return self.wopener(f, mode)
535 return self.wopener(f, mode)
536
536
537 def _link(self, f):
537 def _link(self, f):
538 return os.path.islink(self.wjoin(f))
538 return os.path.islink(self.wjoin(f))
539
539
540 def _filter(self, filter, filename, data):
540 def _filter(self, filter, filename, data):
541 if filter not in self.filterpats:
541 if filter not in self.filterpats:
542 l = []
542 l = []
543 for pat, cmd in self.ui.configitems(filter):
543 for pat, cmd in self.ui.configitems(filter):
544 if cmd == '!':
544 if cmd == '!':
545 continue
545 continue
546 mf = util.matcher(self.root, "", [pat], [], [])[1]
546 mf = util.matcher(self.root, "", [pat], [], [])[1]
547 fn = None
547 fn = None
548 params = cmd
548 params = cmd
549 for name, filterfn in self._datafilters.iteritems():
549 for name, filterfn in self._datafilters.iteritems():
550 if cmd.startswith(name):
550 if cmd.startswith(name):
551 fn = filterfn
551 fn = filterfn
552 params = cmd[len(name):].lstrip()
552 params = cmd[len(name):].lstrip()
553 break
553 break
554 if not fn:
554 if not fn:
555 fn = lambda s, c, **kwargs: util.filter(s, c)
555 fn = lambda s, c, **kwargs: util.filter(s, c)
556 # Wrap old filters not supporting keyword arguments
556 # Wrap old filters not supporting keyword arguments
557 if not inspect.getargspec(fn)[2]:
557 if not inspect.getargspec(fn)[2]:
558 oldfn = fn
558 oldfn = fn
559 fn = lambda s, c, **kwargs: oldfn(s, c)
559 fn = lambda s, c, **kwargs: oldfn(s, c)
560 l.append((mf, fn, params))
560 l.append((mf, fn, params))
561 self.filterpats[filter] = l
561 self.filterpats[filter] = l
562
562
563 for mf, fn, cmd in self.filterpats[filter]:
563 for mf, fn, cmd in self.filterpats[filter]:
564 if mf(filename):
564 if mf(filename):
565 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
565 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
566 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
566 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
567 break
567 break
568
568
569 return data
569 return data
570
570
571 def adddatafilter(self, name, filter):
571 def adddatafilter(self, name, filter):
572 self._datafilters[name] = filter
572 self._datafilters[name] = filter
573
573
574 def wread(self, filename):
574 def wread(self, filename):
575 if self._link(filename):
575 if self._link(filename):
576 data = os.readlink(self.wjoin(filename))
576 data = os.readlink(self.wjoin(filename))
577 else:
577 else:
578 data = self.wopener(filename, 'r').read()
578 data = self.wopener(filename, 'r').read()
579 return self._filter("encode", filename, data)
579 return self._filter("encode", filename, data)
580
580
581 def wwrite(self, filename, data, flags):
581 def wwrite(self, filename, data, flags):
582 data = self._filter("decode", filename, data)
582 data = self._filter("decode", filename, data)
583 try:
583 try:
584 os.unlink(self.wjoin(filename))
584 os.unlink(self.wjoin(filename))
585 except OSError:
585 except OSError:
586 pass
586 pass
587 if 'l' in flags:
587 if 'l' in flags:
588 self.wopener.symlink(data, filename)
588 self.wopener.symlink(data, filename)
589 else:
589 else:
590 self.wopener(filename, 'w').write(data)
590 self.wopener(filename, 'w').write(data)
591 if 'x' in flags:
591 if 'x' in flags:
592 util.set_flags(self.wjoin(filename), False, True)
592 util.set_flags(self.wjoin(filename), False, True)
593
593
594 def wwritedata(self, filename, data):
594 def wwritedata(self, filename, data):
595 return self._filter("decode", filename, data)
595 return self._filter("decode", filename, data)
596
596
597 def transaction(self):
597 def transaction(self):
598 tr = self._transref and self._transref() or None
598 tr = self._transref and self._transref() or None
599 if tr and tr.running():
599 if tr and tr.running():
600 return tr.nest()
600 return tr.nest()
601
601
602 # abort here if the journal already exists
602 # abort here if the journal already exists
603 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
604 raise error.RepoError(_("journal already exists - run hg recover"))
604 raise error.RepoError(_("journal already exists - run hg recover"))
605
605
606 # save dirstate for rollback
606 # save dirstate for rollback
607 try:
607 try:
608 ds = self.opener("dirstate").read()
608 ds = self.opener("dirstate").read()
609 except IOError:
609 except IOError:
610 ds = ""
610 ds = ""
611 self.opener("journal.dirstate", "w").write(ds)
611 self.opener("journal.dirstate", "w").write(ds)
612 self.opener("journal.branch", "w").write(self.dirstate.branch())
612 self.opener("journal.branch", "w").write(self.dirstate.branch())
613
613
614 renames = [(self.sjoin("journal"), self.sjoin("undo")),
614 renames = [(self.sjoin("journal"), self.sjoin("undo")),
615 (self.join("journal.dirstate"), self.join("undo.dirstate")),
615 (self.join("journal.dirstate"), self.join("undo.dirstate")),
616 (self.join("journal.branch"), self.join("undo.branch"))]
616 (self.join("journal.branch"), self.join("undo.branch"))]
617 tr = transaction.transaction(self.ui.warn, self.sopener,
617 tr = transaction.transaction(self.ui.warn, self.sopener,
618 self.sjoin("journal"),
618 self.sjoin("journal"),
619 aftertrans(renames),
619 aftertrans(renames),
620 self.store.createmode)
620 self.store.createmode)
621 self._transref = weakref.ref(tr)
621 self._transref = weakref.ref(tr)
622 return tr
622 return tr
623
623
624 def recover(self):
624 def recover(self):
625 lock = self.lock()
625 lock = self.lock()
626 try:
626 try:
627 if os.path.exists(self.sjoin("journal")):
627 if os.path.exists(self.sjoin("journal")):
628 self.ui.status(_("rolling back interrupted transaction\n"))
628 self.ui.status(_("rolling back interrupted transaction\n"))
629 transaction.rollback(self.sopener, self.sjoin("journal"))
629 transaction.rollback(self.sopener, self.sjoin("journal"))
630 self.invalidate()
630 self.invalidate()
631 return True
631 return True
632 else:
632 else:
633 self.ui.warn(_("no interrupted transaction available\n"))
633 self.ui.warn(_("no interrupted transaction available\n"))
634 return False
634 return False
635 finally:
635 finally:
636 lock.release()
636 lock.release()
637
637
638 def rollback(self):
638 def rollback(self):
639 wlock = lock = None
639 wlock = lock = None
640 try:
640 try:
641 wlock = self.wlock()
641 wlock = self.wlock()
642 lock = self.lock()
642 lock = self.lock()
643 if os.path.exists(self.sjoin("undo")):
643 if os.path.exists(self.sjoin("undo")):
644 self.ui.status(_("rolling back last transaction\n"))
644 self.ui.status(_("rolling back last transaction\n"))
645 transaction.rollback(self.sopener, self.sjoin("undo"))
645 transaction.rollback(self.sopener, self.sjoin("undo"))
646 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
646 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
647 try:
647 try:
648 branch = self.opener("undo.branch").read()
648 branch = self.opener("undo.branch").read()
649 self.dirstate.setbranch(branch)
649 self.dirstate.setbranch(branch)
650 except IOError:
650 except IOError:
651 self.ui.warn(_("Named branch could not be reset, "
651 self.ui.warn(_("Named branch could not be reset, "
652 "current branch still is: %s\n")
652 "current branch still is: %s\n")
653 % encoding.tolocal(self.dirstate.branch()))
653 % encoding.tolocal(self.dirstate.branch()))
654 self.invalidate()
654 self.invalidate()
655 self.dirstate.invalidate()
655 self.dirstate.invalidate()
656 else:
656 else:
657 self.ui.warn(_("no rollback information available\n"))
657 self.ui.warn(_("no rollback information available\n"))
658 finally:
658 finally:
659 release(lock, wlock)
659 release(lock, wlock)
660
660
661 def invalidate(self):
661 def invalidate(self):
662 for a in "changelog manifest".split():
662 for a in "changelog manifest".split():
663 if a in self.__dict__:
663 if a in self.__dict__:
664 delattr(self, a)
664 delattr(self, a)
665 self.tagscache = None
665 self.tagscache = None
666 self._tagstypecache = None
666 self._tagstypecache = None
667 self.nodetagscache = None
667 self.nodetagscache = None
668 self.branchcache = None
668 self.branchcache = None
669 self._ubranchcache = None
669 self._ubranchcache = None
670 self._branchcachetip = None
670 self._branchcachetip = None
671
671
672 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
672 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
673 try:
673 try:
674 l = lock.lock(lockname, 0, releasefn, desc=desc)
674 l = lock.lock(lockname, 0, releasefn, desc=desc)
675 except error.LockHeld, inst:
675 except error.LockHeld, inst:
676 if not wait:
676 if not wait:
677 raise
677 raise
678 self.ui.warn(_("waiting for lock on %s held by %r\n") %
678 self.ui.warn(_("waiting for lock on %s held by %r\n") %
679 (desc, inst.locker))
679 (desc, inst.locker))
680 # default to 600 seconds timeout
680 # default to 600 seconds timeout
681 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
681 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
682 releasefn, desc=desc)
682 releasefn, desc=desc)
683 if acquirefn:
683 if acquirefn:
684 acquirefn()
684 acquirefn()
685 return l
685 return l
686
686
687 def lock(self, wait=True):
687 def lock(self, wait=True):
688 l = self._lockref and self._lockref()
688 l = self._lockref and self._lockref()
689 if l is not None and l.held:
689 if l is not None and l.held:
690 l.lock()
690 l.lock()
691 return l
691 return l
692
692
693 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
693 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
694 _('repository %s') % self.origroot)
694 _('repository %s') % self.origroot)
695 self._lockref = weakref.ref(l)
695 self._lockref = weakref.ref(l)
696 return l
696 return l
697
697
698 def wlock(self, wait=True):
698 def wlock(self, wait=True):
699 l = self._wlockref and self._wlockref()
699 l = self._wlockref and self._wlockref()
700 if l is not None and l.held:
700 if l is not None and l.held:
701 l.lock()
701 l.lock()
702 return l
702 return l
703
703
704 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
704 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
705 self.dirstate.invalidate, _('working directory of %s') %
705 self.dirstate.invalidate, _('working directory of %s') %
706 self.origroot)
706 self.origroot)
707 self._wlockref = weakref.ref(l)
707 self._wlockref = weakref.ref(l)
708 return l
708 return l
709
709
710 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
710 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
711 """
711 """
712 commit an individual file as part of a larger transaction
712 commit an individual file as part of a larger transaction
713 """
713 """
714
714
715 fn = fctx.path()
715 fn = fctx.path()
716 t = fctx.data()
716 t = fctx.data()
717 fl = self.file(fn)
717 fl = self.file(fn)
718 fp1 = manifest1.get(fn, nullid)
718 fp1 = manifest1.get(fn, nullid)
719 fp2 = manifest2.get(fn, nullid)
719 fp2 = manifest2.get(fn, nullid)
720
720
721 meta = {}
721 meta = {}
722 cp = fctx.renamed()
722 cp = fctx.renamed()
723 if cp and cp[0] != fn:
723 if cp and cp[0] != fn:
724 # Mark the new revision of this file as a copy of another
724 # Mark the new revision of this file as a copy of another
725 # file. This copy data will effectively act as a parent
725 # file. This copy data will effectively act as a parent
726 # of this new revision. If this is a merge, the first
726 # of this new revision. If this is a merge, the first
727 # parent will be the nullid (meaning "look up the copy data")
727 # parent will be the nullid (meaning "look up the copy data")
728 # and the second one will be the other parent. For example:
728 # and the second one will be the other parent. For example:
729 #
729 #
730 # 0 --- 1 --- 3 rev1 changes file foo
730 # 0 --- 1 --- 3 rev1 changes file foo
731 # \ / rev2 renames foo to bar and changes it
731 # \ / rev2 renames foo to bar and changes it
732 # \- 2 -/ rev3 should have bar with all changes and
732 # \- 2 -/ rev3 should have bar with all changes and
733 # should record that bar descends from
733 # should record that bar descends from
734 # bar in rev2 and foo in rev1
734 # bar in rev2 and foo in rev1
735 #
735 #
736 # this allows this merge to succeed:
736 # this allows this merge to succeed:
737 #
737 #
738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
739 # \ / merging rev3 and rev4 should use bar@rev2
739 # \ / merging rev3 and rev4 should use bar@rev2
740 # \- 2 --- 4 as the merge base
740 # \- 2 --- 4 as the merge base
741 #
741 #
742
742
743 cf = cp[0]
743 cf = cp[0]
744 cr = manifest1.get(cf)
744 cr = manifest1.get(cf)
745 nfp = fp2
745 nfp = fp2
746
746
747 if manifest2: # branch merge
747 if manifest2: # branch merge
748 if fp2 == nullid or cr is None: # copied on remote side
748 if fp2 == nullid or cr is None: # copied on remote side
749 if cf in manifest2:
749 if cf in manifest2:
750 cr = manifest2[cf]
750 cr = manifest2[cf]
751 nfp = fp1
751 nfp = fp1
752
752
753 # find source in nearest ancestor if we've lost track
753 # find source in nearest ancestor if we've lost track
754 if not cr:
754 if not cr:
755 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
755 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
756 (fn, cf))
756 (fn, cf))
757 for a in self['.'].ancestors():
757 for a in self['.'].ancestors():
758 if cf in a:
758 if cf in a:
759 cr = a[cf].filenode()
759 cr = a[cf].filenode()
760 break
760 break
761
761
762 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
762 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
763 meta["copy"] = cf
763 meta["copy"] = cf
764 meta["copyrev"] = hex(cr)
764 meta["copyrev"] = hex(cr)
765 fp1, fp2 = nullid, nfp
765 fp1, fp2 = nullid, nfp
766 elif fp2 != nullid:
766 elif fp2 != nullid:
767 # is one parent an ancestor of the other?
767 # is one parent an ancestor of the other?
768 fpa = fl.ancestor(fp1, fp2)
768 fpa = fl.ancestor(fp1, fp2)
769 if fpa == fp1:
769 if fpa == fp1:
770 fp1, fp2 = fp2, nullid
770 fp1, fp2 = fp2, nullid
771 elif fpa == fp2:
771 elif fpa == fp2:
772 fp2 = nullid
772 fp2 = nullid
773
773
774 # is the file unmodified from the parent? report existing entry
774 # is the file unmodified from the parent? report existing entry
775 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
775 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
776 return fp1
776 return fp1
777
777
778 changelist.append(fn)
778 changelist.append(fn)
779 return fl.add(t, meta, tr, linkrev, fp1, fp2)
779 return fl.add(t, meta, tr, linkrev, fp1, fp2)
780
780
781 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
781 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
782 if p1 is None:
782 if p1 is None:
783 p1, p2 = self.dirstate.parents()
783 p1, p2 = self.dirstate.parents()
784 return self.commit(files=files, text=text, user=user, date=date,
784 return self.commit(files=files, text=text, user=user, date=date,
785 p1=p1, p2=p2, extra=extra, empty_ok=True)
785 p1=p1, p2=p2, extra=extra, empty_ok=True)
786
786
787 def commit(self, files=None, text="", user=None, date=None,
787 def commit(self, files=None, text="", user=None, date=None,
788 match=None, force=False, force_editor=False,
788 match=None, force=False, force_editor=False,
789 p1=None, p2=None, extra={}, empty_ok=False):
789 p1=None, p2=None, extra={}, empty_ok=False):
790 wlock = lock = None
790 wlock = lock = None
791 if extra.get("close"):
791 if extra.get("close"):
792 force = True
792 force = True
793 if files:
793 if files:
794 files = util.unique(files)
794 files = list(set(files))
795 try:
795 try:
796 wlock = self.wlock()
796 wlock = self.wlock()
797 lock = self.lock()
797 lock = self.lock()
798 use_dirstate = (p1 is None) # not rawcommit
798 use_dirstate = (p1 is None) # not rawcommit
799
799
800 if use_dirstate:
800 if use_dirstate:
801 p1, p2 = self.dirstate.parents()
801 p1, p2 = self.dirstate.parents()
802 update_dirstate = True
802 update_dirstate = True
803
803
804 if (not force and p2 != nullid and
804 if (not force and p2 != nullid and
805 (match and (match.files() or match.anypats()))):
805 (match and (match.files() or match.anypats()))):
806 raise util.Abort(_('cannot partially commit a merge '
806 raise util.Abort(_('cannot partially commit a merge '
807 '(do not specify files or patterns)'))
807 '(do not specify files or patterns)'))
808
808
809 if files:
809 if files:
810 modified, removed = [], []
810 modified, removed = [], []
811 for f in files:
811 for f in files:
812 s = self.dirstate[f]
812 s = self.dirstate[f]
813 if s in 'nma':
813 if s in 'nma':
814 modified.append(f)
814 modified.append(f)
815 elif s == 'r':
815 elif s == 'r':
816 removed.append(f)
816 removed.append(f)
817 else:
817 else:
818 self.ui.warn(_("%s not tracked!\n") % f)
818 self.ui.warn(_("%s not tracked!\n") % f)
819 changes = [modified, [], removed, [], []]
819 changes = [modified, [], removed, [], []]
820 else:
820 else:
821 changes = self.status(match=match)
821 changes = self.status(match=match)
822 else:
822 else:
823 p1, p2 = p1, p2 or nullid
823 p1, p2 = p1, p2 or nullid
824 update_dirstate = (self.dirstate.parents()[0] == p1)
824 update_dirstate = (self.dirstate.parents()[0] == p1)
825 changes = [files, [], [], [], []]
825 changes = [files, [], [], [], []]
826
826
827 ms = merge_.mergestate(self)
827 ms = merge_.mergestate(self)
828 for f in changes[0]:
828 for f in changes[0]:
829 if f in ms and ms[f] == 'u':
829 if f in ms and ms[f] == 'u':
830 raise util.Abort(_("unresolved merge conflicts "
830 raise util.Abort(_("unresolved merge conflicts "
831 "(see hg resolve)"))
831 "(see hg resolve)"))
832 wctx = context.workingctx(self, (p1, p2), text, user, date,
832 wctx = context.workingctx(self, (p1, p2), text, user, date,
833 extra, changes)
833 extra, changes)
834 r = self._commitctx(wctx, force, force_editor, empty_ok,
834 r = self._commitctx(wctx, force, force_editor, empty_ok,
835 use_dirstate, update_dirstate)
835 use_dirstate, update_dirstate)
836 ms.reset()
836 ms.reset()
837 return r
837 return r
838
838
839 finally:
839 finally:
840 release(lock, wlock)
840 release(lock, wlock)
841
841
842 def commitctx(self, ctx):
842 def commitctx(self, ctx):
843 """Add a new revision to current repository.
843 """Add a new revision to current repository.
844
844
845 Revision information is passed in the context.memctx argument.
845 Revision information is passed in the context.memctx argument.
846 commitctx() does not touch the working directory.
846 commitctx() does not touch the working directory.
847 """
847 """
848 wlock = lock = None
848 wlock = lock = None
849 try:
849 try:
850 wlock = self.wlock()
850 wlock = self.wlock()
851 lock = self.lock()
851 lock = self.lock()
852 return self._commitctx(ctx, force=True, force_editor=False,
852 return self._commitctx(ctx, force=True, force_editor=False,
853 empty_ok=True, use_dirstate=False,
853 empty_ok=True, use_dirstate=False,
854 update_dirstate=False)
854 update_dirstate=False)
855 finally:
855 finally:
856 release(lock, wlock)
856 release(lock, wlock)
857
857
858 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
858 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
859 use_dirstate=True, update_dirstate=True):
859 use_dirstate=True, update_dirstate=True):
860 tr = None
860 tr = None
861 valid = 0 # don't save the dirstate if this isn't set
861 valid = 0 # don't save the dirstate if this isn't set
862 try:
862 try:
863 commit = util.sort(wctx.modified() + wctx.added())
863 commit = util.sort(wctx.modified() + wctx.added())
864 remove = wctx.removed()
864 remove = wctx.removed()
865 extra = wctx.extra().copy()
865 extra = wctx.extra().copy()
866 branchname = extra['branch']
866 branchname = extra['branch']
867 user = wctx.user()
867 user = wctx.user()
868 text = wctx.description()
868 text = wctx.description()
869
869
870 p1, p2 = [p.node() for p in wctx.parents()]
870 p1, p2 = [p.node() for p in wctx.parents()]
871 c1 = self.changelog.read(p1)
871 c1 = self.changelog.read(p1)
872 c2 = self.changelog.read(p2)
872 c2 = self.changelog.read(p2)
873 m1 = self.manifest.read(c1[0]).copy()
873 m1 = self.manifest.read(c1[0]).copy()
874 m2 = self.manifest.read(c2[0])
874 m2 = self.manifest.read(c2[0])
875
875
876 if use_dirstate:
876 if use_dirstate:
877 oldname = c1[5].get("branch") # stored in UTF-8
877 oldname = c1[5].get("branch") # stored in UTF-8
878 if (not commit and not remove and not force and p2 == nullid
878 if (not commit and not remove and not force and p2 == nullid
879 and branchname == oldname):
879 and branchname == oldname):
880 self.ui.status(_("nothing changed\n"))
880 self.ui.status(_("nothing changed\n"))
881 return None
881 return None
882
882
883 xp1 = hex(p1)
883 xp1 = hex(p1)
884 if p2 == nullid: xp2 = ''
884 if p2 == nullid: xp2 = ''
885 else: xp2 = hex(p2)
885 else: xp2 = hex(p2)
886
886
887 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
887 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
888
888
889 tr = self.transaction()
889 tr = self.transaction()
890 trp = weakref.proxy(tr)
890 trp = weakref.proxy(tr)
891
891
892 # check in files
892 # check in files
893 new = {}
893 new = {}
894 changed = []
894 changed = []
895 linkrev = len(self)
895 linkrev = len(self)
896 for f in commit:
896 for f in commit:
897 self.ui.note(f + "\n")
897 self.ui.note(f + "\n")
898 try:
898 try:
899 fctx = wctx.filectx(f)
899 fctx = wctx.filectx(f)
900 newflags = fctx.flags()
900 newflags = fctx.flags()
901 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
901 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
902 if ((not changed or changed[-1] != f) and
902 if ((not changed or changed[-1] != f) and
903 m2.get(f) != new[f]):
903 m2.get(f) != new[f]):
904 # mention the file in the changelog if some
904 # mention the file in the changelog if some
905 # flag changed, even if there was no content
905 # flag changed, even if there was no content
906 # change.
906 # change.
907 if m1.flags(f) != newflags:
907 if m1.flags(f) != newflags:
908 changed.append(f)
908 changed.append(f)
909 m1.set(f, newflags)
909 m1.set(f, newflags)
910 if use_dirstate:
910 if use_dirstate:
911 self.dirstate.normal(f)
911 self.dirstate.normal(f)
912
912
913 except (OSError, IOError):
913 except (OSError, IOError):
914 if use_dirstate:
914 if use_dirstate:
915 self.ui.warn(_("trouble committing %s!\n") % f)
915 self.ui.warn(_("trouble committing %s!\n") % f)
916 raise
916 raise
917 else:
917 else:
918 remove.append(f)
918 remove.append(f)
919
919
920 updated, added = [], []
920 updated, added = [], []
921 for f in util.sort(changed):
921 for f in util.sort(changed):
922 if f in m1 or f in m2:
922 if f in m1 or f in m2:
923 updated.append(f)
923 updated.append(f)
924 else:
924 else:
925 added.append(f)
925 added.append(f)
926
926
927 # update manifest
927 # update manifest
928 m1.update(new)
928 m1.update(new)
929 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
929 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
930 removed1 = []
930 removed1 = []
931
931
932 for f in removed:
932 for f in removed:
933 if f in m1:
933 if f in m1:
934 del m1[f]
934 del m1[f]
935 removed1.append(f)
935 removed1.append(f)
936 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
936 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
937 (new, removed1))
937 (new, removed1))
938
938
939 # add changeset
939 # add changeset
940 if (not empty_ok and not text) or force_editor:
940 if (not empty_ok and not text) or force_editor:
941 edittext = []
941 edittext = []
942 if text:
942 if text:
943 edittext.append(text)
943 edittext.append(text)
944 edittext.append("")
944 edittext.append("")
945 edittext.append("") # Empty line between message and comments.
945 edittext.append("") # Empty line between message and comments.
946 edittext.append(_("HG: Enter commit message."
946 edittext.append(_("HG: Enter commit message."
947 " Lines beginning with 'HG:' are removed."))
947 " Lines beginning with 'HG:' are removed."))
948 edittext.append("HG: --")
948 edittext.append("HG: --")
949 edittext.append("HG: user: %s" % user)
949 edittext.append("HG: user: %s" % user)
950 if p2 != nullid:
950 if p2 != nullid:
951 edittext.append("HG: branch merge")
951 edittext.append("HG: branch merge")
952 if branchname:
952 if branchname:
953 edittext.append("HG: branch '%s'"
953 edittext.append("HG: branch '%s'"
954 % encoding.tolocal(branchname))
954 % encoding.tolocal(branchname))
955 edittext.extend(["HG: added %s" % f for f in added])
955 edittext.extend(["HG: added %s" % f for f in added])
956 edittext.extend(["HG: changed %s" % f for f in updated])
956 edittext.extend(["HG: changed %s" % f for f in updated])
957 edittext.extend(["HG: removed %s" % f for f in removed])
957 edittext.extend(["HG: removed %s" % f for f in removed])
958 if not added and not updated and not removed:
958 if not added and not updated and not removed:
959 edittext.append("HG: no files changed")
959 edittext.append("HG: no files changed")
960 edittext.append("")
960 edittext.append("")
961 # run editor in the repository root
961 # run editor in the repository root
962 olddir = os.getcwd()
962 olddir = os.getcwd()
963 os.chdir(self.root)
963 os.chdir(self.root)
964 text = self.ui.edit("\n".join(edittext), user)
964 text = self.ui.edit("\n".join(edittext), user)
965 os.chdir(olddir)
965 os.chdir(olddir)
966
966
967 lines = [line.rstrip() for line in text.rstrip().splitlines()]
967 lines = [line.rstrip() for line in text.rstrip().splitlines()]
968 while lines and not lines[0]:
968 while lines and not lines[0]:
969 del lines[0]
969 del lines[0]
970 if not lines and use_dirstate:
970 if not lines and use_dirstate:
971 raise util.Abort(_("empty commit message"))
971 raise util.Abort(_("empty commit message"))
972 text = '\n'.join(lines)
972 text = '\n'.join(lines)
973
973
974 self.changelog.delayupdate()
974 self.changelog.delayupdate()
975 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
975 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
976 user, wctx.date(), extra)
976 user, wctx.date(), extra)
977 p = lambda: self.changelog.writepending() and self.root or ""
977 p = lambda: self.changelog.writepending() and self.root or ""
978 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
978 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
979 parent2=xp2, pending=p)
979 parent2=xp2, pending=p)
980 self.changelog.finalize(trp)
980 self.changelog.finalize(trp)
981 tr.close()
981 tr.close()
982
982
983 if self.branchcache:
983 if self.branchcache:
984 self.branchtags()
984 self.branchtags()
985
985
986 if use_dirstate or update_dirstate:
986 if use_dirstate or update_dirstate:
987 self.dirstate.setparents(n)
987 self.dirstate.setparents(n)
988 if use_dirstate:
988 if use_dirstate:
989 for f in removed:
989 for f in removed:
990 self.dirstate.forget(f)
990 self.dirstate.forget(f)
991 valid = 1 # our dirstate updates are complete
991 valid = 1 # our dirstate updates are complete
992
992
993 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
993 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
994 return n
994 return n
995 finally:
995 finally:
996 if not valid: # don't save our updated dirstate
996 if not valid: # don't save our updated dirstate
997 self.dirstate.invalidate()
997 self.dirstate.invalidate()
998 del tr
998 del tr
999
999
1000 def walk(self, match, node=None):
1000 def walk(self, match, node=None):
1001 '''
1001 '''
1002 walk recursively through the directory tree or a given
1002 walk recursively through the directory tree or a given
1003 changeset, finding all files matched by the match
1003 changeset, finding all files matched by the match
1004 function
1004 function
1005 '''
1005 '''
1006 return self[node].walk(match)
1006 return self[node].walk(match)
1007
1007
1008 def status(self, node1='.', node2=None, match=None,
1008 def status(self, node1='.', node2=None, match=None,
1009 ignored=False, clean=False, unknown=False):
1009 ignored=False, clean=False, unknown=False):
1010 """return status of files between two nodes or node and working directory
1010 """return status of files between two nodes or node and working directory
1011
1011
1012 If node1 is None, use the first dirstate parent instead.
1012 If node1 is None, use the first dirstate parent instead.
1013 If node2 is None, compare node1 with working directory.
1013 If node2 is None, compare node1 with working directory.
1014 """
1014 """
1015
1015
1016 def mfmatches(ctx):
1016 def mfmatches(ctx):
1017 mf = ctx.manifest().copy()
1017 mf = ctx.manifest().copy()
1018 for fn in mf.keys():
1018 for fn in mf.keys():
1019 if not match(fn):
1019 if not match(fn):
1020 del mf[fn]
1020 del mf[fn]
1021 return mf
1021 return mf
1022
1022
1023 if isinstance(node1, context.changectx):
1023 if isinstance(node1, context.changectx):
1024 ctx1 = node1
1024 ctx1 = node1
1025 else:
1025 else:
1026 ctx1 = self[node1]
1026 ctx1 = self[node1]
1027 if isinstance(node2, context.changectx):
1027 if isinstance(node2, context.changectx):
1028 ctx2 = node2
1028 ctx2 = node2
1029 else:
1029 else:
1030 ctx2 = self[node2]
1030 ctx2 = self[node2]
1031
1031
1032 working = ctx2.rev() is None
1032 working = ctx2.rev() is None
1033 parentworking = working and ctx1 == self['.']
1033 parentworking = working and ctx1 == self['.']
1034 match = match or match_.always(self.root, self.getcwd())
1034 match = match or match_.always(self.root, self.getcwd())
1035 listignored, listclean, listunknown = ignored, clean, unknown
1035 listignored, listclean, listunknown = ignored, clean, unknown
1036
1036
1037 # load earliest manifest first for caching reasons
1037 # load earliest manifest first for caching reasons
1038 if not working and ctx2.rev() < ctx1.rev():
1038 if not working and ctx2.rev() < ctx1.rev():
1039 ctx2.manifest()
1039 ctx2.manifest()
1040
1040
1041 if not parentworking:
1041 if not parentworking:
1042 def bad(f, msg):
1042 def bad(f, msg):
1043 if f not in ctx1:
1043 if f not in ctx1:
1044 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1044 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1045 return False
1045 return False
1046 match.bad = bad
1046 match.bad = bad
1047
1047
1048 if working: # we need to scan the working dir
1048 if working: # we need to scan the working dir
1049 s = self.dirstate.status(match, listignored, listclean, listunknown)
1049 s = self.dirstate.status(match, listignored, listclean, listunknown)
1050 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1050 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1051
1051
1052 # check for any possibly clean files
1052 # check for any possibly clean files
1053 if parentworking and cmp:
1053 if parentworking and cmp:
1054 fixup = []
1054 fixup = []
1055 # do a full compare of any files that might have changed
1055 # do a full compare of any files that might have changed
1056 for f in cmp:
1056 for f in cmp:
1057 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1057 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1058 or ctx1[f].cmp(ctx2[f].data())):
1058 or ctx1[f].cmp(ctx2[f].data())):
1059 modified.append(f)
1059 modified.append(f)
1060 else:
1060 else:
1061 fixup.append(f)
1061 fixup.append(f)
1062
1062
1063 if listclean:
1063 if listclean:
1064 clean += fixup
1064 clean += fixup
1065
1065
1066 # update dirstate for files that are actually clean
1066 # update dirstate for files that are actually clean
1067 if fixup:
1067 if fixup:
1068 wlock = None
1068 wlock = None
1069 try:
1069 try:
1070 try:
1070 try:
1071 # updating the dirstate is optional
1071 # updating the dirstate is optional
1072 # so we dont wait on the lock
1072 # so we dont wait on the lock
1073 wlock = self.wlock(False)
1073 wlock = self.wlock(False)
1074 for f in fixup:
1074 for f in fixup:
1075 self.dirstate.normal(f)
1075 self.dirstate.normal(f)
1076 except error.LockError:
1076 except error.LockError:
1077 pass
1077 pass
1078 finally:
1078 finally:
1079 release(wlock)
1079 release(wlock)
1080
1080
1081 if not parentworking:
1081 if not parentworking:
1082 mf1 = mfmatches(ctx1)
1082 mf1 = mfmatches(ctx1)
1083 if working:
1083 if working:
1084 # we are comparing working dir against non-parent
1084 # we are comparing working dir against non-parent
1085 # generate a pseudo-manifest for the working dir
1085 # generate a pseudo-manifest for the working dir
1086 mf2 = mfmatches(self['.'])
1086 mf2 = mfmatches(self['.'])
1087 for f in cmp + modified + added:
1087 for f in cmp + modified + added:
1088 mf2[f] = None
1088 mf2[f] = None
1089 mf2.set(f, ctx2.flags(f))
1089 mf2.set(f, ctx2.flags(f))
1090 for f in removed:
1090 for f in removed:
1091 if f in mf2:
1091 if f in mf2:
1092 del mf2[f]
1092 del mf2[f]
1093 else:
1093 else:
1094 # we are comparing two revisions
1094 # we are comparing two revisions
1095 deleted, unknown, ignored = [], [], []
1095 deleted, unknown, ignored = [], [], []
1096 mf2 = mfmatches(ctx2)
1096 mf2 = mfmatches(ctx2)
1097
1097
1098 modified, added, clean = [], [], []
1098 modified, added, clean = [], [], []
1099 for fn in mf2:
1099 for fn in mf2:
1100 if fn in mf1:
1100 if fn in mf1:
1101 if (mf1.flags(fn) != mf2.flags(fn) or
1101 if (mf1.flags(fn) != mf2.flags(fn) or
1102 (mf1[fn] != mf2[fn] and
1102 (mf1[fn] != mf2[fn] and
1103 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1103 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1104 modified.append(fn)
1104 modified.append(fn)
1105 elif listclean:
1105 elif listclean:
1106 clean.append(fn)
1106 clean.append(fn)
1107 del mf1[fn]
1107 del mf1[fn]
1108 else:
1108 else:
1109 added.append(fn)
1109 added.append(fn)
1110 removed = mf1.keys()
1110 removed = mf1.keys()
1111
1111
1112 r = modified, added, removed, deleted, unknown, ignored, clean
1112 r = modified, added, removed, deleted, unknown, ignored, clean
1113 [l.sort() for l in r]
1113 [l.sort() for l in r]
1114 return r
1114 return r
1115
1115
1116 def add(self, list):
1116 def add(self, list):
1117 wlock = self.wlock()
1117 wlock = self.wlock()
1118 try:
1118 try:
1119 rejected = []
1119 rejected = []
1120 for f in list:
1120 for f in list:
1121 p = self.wjoin(f)
1121 p = self.wjoin(f)
1122 try:
1122 try:
1123 st = os.lstat(p)
1123 st = os.lstat(p)
1124 except:
1124 except:
1125 self.ui.warn(_("%s does not exist!\n") % f)
1125 self.ui.warn(_("%s does not exist!\n") % f)
1126 rejected.append(f)
1126 rejected.append(f)
1127 continue
1127 continue
1128 if st.st_size > 10000000:
1128 if st.st_size > 10000000:
1129 self.ui.warn(_("%s: files over 10MB may cause memory and"
1129 self.ui.warn(_("%s: files over 10MB may cause memory and"
1130 " performance problems\n"
1130 " performance problems\n"
1131 "(use 'hg revert %s' to unadd the file)\n")
1131 "(use 'hg revert %s' to unadd the file)\n")
1132 % (f, f))
1132 % (f, f))
1133 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1133 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1134 self.ui.warn(_("%s not added: only files and symlinks "
1134 self.ui.warn(_("%s not added: only files and symlinks "
1135 "supported currently\n") % f)
1135 "supported currently\n") % f)
1136 rejected.append(p)
1136 rejected.append(p)
1137 elif self.dirstate[f] in 'amn':
1137 elif self.dirstate[f] in 'amn':
1138 self.ui.warn(_("%s already tracked!\n") % f)
1138 self.ui.warn(_("%s already tracked!\n") % f)
1139 elif self.dirstate[f] == 'r':
1139 elif self.dirstate[f] == 'r':
1140 self.dirstate.normallookup(f)
1140 self.dirstate.normallookup(f)
1141 else:
1141 else:
1142 self.dirstate.add(f)
1142 self.dirstate.add(f)
1143 return rejected
1143 return rejected
1144 finally:
1144 finally:
1145 wlock.release()
1145 wlock.release()
1146
1146
1147 def forget(self, list):
1147 def forget(self, list):
1148 wlock = self.wlock()
1148 wlock = self.wlock()
1149 try:
1149 try:
1150 for f in list:
1150 for f in list:
1151 if self.dirstate[f] != 'a':
1151 if self.dirstate[f] != 'a':
1152 self.ui.warn(_("%s not added!\n") % f)
1152 self.ui.warn(_("%s not added!\n") % f)
1153 else:
1153 else:
1154 self.dirstate.forget(f)
1154 self.dirstate.forget(f)
1155 finally:
1155 finally:
1156 wlock.release()
1156 wlock.release()
1157
1157
1158 def remove(self, list, unlink=False):
1158 def remove(self, list, unlink=False):
1159 wlock = None
1159 wlock = None
1160 try:
1160 try:
1161 if unlink:
1161 if unlink:
1162 for f in list:
1162 for f in list:
1163 try:
1163 try:
1164 util.unlink(self.wjoin(f))
1164 util.unlink(self.wjoin(f))
1165 except OSError, inst:
1165 except OSError, inst:
1166 if inst.errno != errno.ENOENT:
1166 if inst.errno != errno.ENOENT:
1167 raise
1167 raise
1168 wlock = self.wlock()
1168 wlock = self.wlock()
1169 for f in list:
1169 for f in list:
1170 if unlink and os.path.exists(self.wjoin(f)):
1170 if unlink and os.path.exists(self.wjoin(f)):
1171 self.ui.warn(_("%s still exists!\n") % f)
1171 self.ui.warn(_("%s still exists!\n") % f)
1172 elif self.dirstate[f] == 'a':
1172 elif self.dirstate[f] == 'a':
1173 self.dirstate.forget(f)
1173 self.dirstate.forget(f)
1174 elif f not in self.dirstate:
1174 elif f not in self.dirstate:
1175 self.ui.warn(_("%s not tracked!\n") % f)
1175 self.ui.warn(_("%s not tracked!\n") % f)
1176 else:
1176 else:
1177 self.dirstate.remove(f)
1177 self.dirstate.remove(f)
1178 finally:
1178 finally:
1179 release(wlock)
1179 release(wlock)
1180
1180
1181 def undelete(self, list):
1181 def undelete(self, list):
1182 manifests = [self.manifest.read(self.changelog.read(p)[0])
1182 manifests = [self.manifest.read(self.changelog.read(p)[0])
1183 for p in self.dirstate.parents() if p != nullid]
1183 for p in self.dirstate.parents() if p != nullid]
1184 wlock = self.wlock()
1184 wlock = self.wlock()
1185 try:
1185 try:
1186 for f in list:
1186 for f in list:
1187 if self.dirstate[f] != 'r':
1187 if self.dirstate[f] != 'r':
1188 self.ui.warn(_("%s not removed!\n") % f)
1188 self.ui.warn(_("%s not removed!\n") % f)
1189 else:
1189 else:
1190 m = f in manifests[0] and manifests[0] or manifests[1]
1190 m = f in manifests[0] and manifests[0] or manifests[1]
1191 t = self.file(f).read(m[f])
1191 t = self.file(f).read(m[f])
1192 self.wwrite(f, t, m.flags(f))
1192 self.wwrite(f, t, m.flags(f))
1193 self.dirstate.normal(f)
1193 self.dirstate.normal(f)
1194 finally:
1194 finally:
1195 wlock.release()
1195 wlock.release()
1196
1196
1197 def copy(self, source, dest):
1197 def copy(self, source, dest):
1198 p = self.wjoin(dest)
1198 p = self.wjoin(dest)
1199 if not (os.path.exists(p) or os.path.islink(p)):
1199 if not (os.path.exists(p) or os.path.islink(p)):
1200 self.ui.warn(_("%s does not exist!\n") % dest)
1200 self.ui.warn(_("%s does not exist!\n") % dest)
1201 elif not (os.path.isfile(p) or os.path.islink(p)):
1201 elif not (os.path.isfile(p) or os.path.islink(p)):
1202 self.ui.warn(_("copy failed: %s is not a file or a "
1202 self.ui.warn(_("copy failed: %s is not a file or a "
1203 "symbolic link\n") % dest)
1203 "symbolic link\n") % dest)
1204 else:
1204 else:
1205 wlock = self.wlock()
1205 wlock = self.wlock()
1206 try:
1206 try:
1207 if self.dirstate[dest] in '?r':
1207 if self.dirstate[dest] in '?r':
1208 self.dirstate.add(dest)
1208 self.dirstate.add(dest)
1209 self.dirstate.copy(source, dest)
1209 self.dirstate.copy(source, dest)
1210 finally:
1210 finally:
1211 wlock.release()
1211 wlock.release()
1212
1212
1213 def heads(self, start=None, closed=True):
1213 def heads(self, start=None, closed=True):
1214 heads = self.changelog.heads(start)
1214 heads = self.changelog.heads(start)
1215 def display(head):
1215 def display(head):
1216 if closed:
1216 if closed:
1217 return True
1217 return True
1218 extras = self.changelog.read(head)[5]
1218 extras = self.changelog.read(head)[5]
1219 return ('close' not in extras)
1219 return ('close' not in extras)
1220 # sort the output in rev descending order
1220 # sort the output in rev descending order
1221 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1221 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1222 return [n for (r, n) in util.sort(heads)]
1222 return [n for (r, n) in util.sort(heads)]
1223
1223
1224 def branchheads(self, branch=None, start=None, closed=True):
1224 def branchheads(self, branch=None, start=None, closed=True):
1225 if branch is None:
1225 if branch is None:
1226 branch = self[None].branch()
1226 branch = self[None].branch()
1227 branches = self._branchheads()
1227 branches = self._branchheads()
1228 if branch not in branches:
1228 if branch not in branches:
1229 return []
1229 return []
1230 bheads = branches[branch]
1230 bheads = branches[branch]
1231 # the cache returns heads ordered lowest to highest
1231 # the cache returns heads ordered lowest to highest
1232 bheads.reverse()
1232 bheads.reverse()
1233 if start is not None:
1233 if start is not None:
1234 # filter out the heads that cannot be reached from startrev
1234 # filter out the heads that cannot be reached from startrev
1235 bheads = self.changelog.nodesbetween([start], bheads)[2]
1235 bheads = self.changelog.nodesbetween([start], bheads)[2]
1236 if not closed:
1236 if not closed:
1237 bheads = [h for h in bheads if
1237 bheads = [h for h in bheads if
1238 ('close' not in self.changelog.read(h)[5])]
1238 ('close' not in self.changelog.read(h)[5])]
1239 return bheads
1239 return bheads
1240
1240
1241 def branches(self, nodes):
1241 def branches(self, nodes):
1242 if not nodes:
1242 if not nodes:
1243 nodes = [self.changelog.tip()]
1243 nodes = [self.changelog.tip()]
1244 b = []
1244 b = []
1245 for n in nodes:
1245 for n in nodes:
1246 t = n
1246 t = n
1247 while 1:
1247 while 1:
1248 p = self.changelog.parents(n)
1248 p = self.changelog.parents(n)
1249 if p[1] != nullid or p[0] == nullid:
1249 if p[1] != nullid or p[0] == nullid:
1250 b.append((t, n, p[0], p[1]))
1250 b.append((t, n, p[0], p[1]))
1251 break
1251 break
1252 n = p[0]
1252 n = p[0]
1253 return b
1253 return b
1254
1254
1255 def between(self, pairs):
1255 def between(self, pairs):
1256 r = []
1256 r = []
1257
1257
1258 for top, bottom in pairs:
1258 for top, bottom in pairs:
1259 n, l, i = top, [], 0
1259 n, l, i = top, [], 0
1260 f = 1
1260 f = 1
1261
1261
1262 while n != bottom and n != nullid:
1262 while n != bottom and n != nullid:
1263 p = self.changelog.parents(n)[0]
1263 p = self.changelog.parents(n)[0]
1264 if i == f:
1264 if i == f:
1265 l.append(n)
1265 l.append(n)
1266 f = f * 2
1266 f = f * 2
1267 n = p
1267 n = p
1268 i += 1
1268 i += 1
1269
1269
1270 r.append(l)
1270 r.append(l)
1271
1271
1272 return r
1272 return r
1273
1273
1274 def findincoming(self, remote, base=None, heads=None, force=False):
1274 def findincoming(self, remote, base=None, heads=None, force=False):
1275 """Return list of roots of the subsets of missing nodes from remote
1275 """Return list of roots of the subsets of missing nodes from remote
1276
1276
1277 If base dict is specified, assume that these nodes and their parents
1277 If base dict is specified, assume that these nodes and their parents
1278 exist on the remote side and that no child of a node of base exists
1278 exist on the remote side and that no child of a node of base exists
1279 in both remote and self.
1279 in both remote and self.
1280 Furthermore base will be updated to include the nodes that exists
1280 Furthermore base will be updated to include the nodes that exists
1281 in self and remote but no children exists in self and remote.
1281 in self and remote but no children exists in self and remote.
1282 If a list of heads is specified, return only nodes which are heads
1282 If a list of heads is specified, return only nodes which are heads
1283 or ancestors of these heads.
1283 or ancestors of these heads.
1284
1284
1285 All the ancestors of base are in self and in remote.
1285 All the ancestors of base are in self and in remote.
1286 All the descendants of the list returned are missing in self.
1286 All the descendants of the list returned are missing in self.
1287 (and so we know that the rest of the nodes are missing in remote, see
1287 (and so we know that the rest of the nodes are missing in remote, see
1288 outgoing)
1288 outgoing)
1289 """
1289 """
1290 return self.findcommonincoming(remote, base, heads, force)[1]
1290 return self.findcommonincoming(remote, base, heads, force)[1]
1291
1291
1292 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1292 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1293 """Return a tuple (common, missing roots, heads) used to identify
1293 """Return a tuple (common, missing roots, heads) used to identify
1294 missing nodes from remote.
1294 missing nodes from remote.
1295
1295
1296 If base dict is specified, assume that these nodes and their parents
1296 If base dict is specified, assume that these nodes and their parents
1297 exist on the remote side and that no child of a node of base exists
1297 exist on the remote side and that no child of a node of base exists
1298 in both remote and self.
1298 in both remote and self.
1299 Furthermore base will be updated to include the nodes that exists
1299 Furthermore base will be updated to include the nodes that exists
1300 in self and remote but no children exists in self and remote.
1300 in self and remote but no children exists in self and remote.
1301 If a list of heads is specified, return only nodes which are heads
1301 If a list of heads is specified, return only nodes which are heads
1302 or ancestors of these heads.
1302 or ancestors of these heads.
1303
1303
1304 All the ancestors of base are in self and in remote.
1304 All the ancestors of base are in self and in remote.
1305 """
1305 """
1306 m = self.changelog.nodemap
1306 m = self.changelog.nodemap
1307 search = []
1307 search = []
1308 fetch = {}
1308 fetch = {}
1309 seen = {}
1309 seen = {}
1310 seenbranch = {}
1310 seenbranch = {}
1311 if base == None:
1311 if base == None:
1312 base = {}
1312 base = {}
1313
1313
1314 if not heads:
1314 if not heads:
1315 heads = remote.heads()
1315 heads = remote.heads()
1316
1316
1317 if self.changelog.tip() == nullid:
1317 if self.changelog.tip() == nullid:
1318 base[nullid] = 1
1318 base[nullid] = 1
1319 if heads != [nullid]:
1319 if heads != [nullid]:
1320 return [nullid], [nullid], list(heads)
1320 return [nullid], [nullid], list(heads)
1321 return [nullid], [], []
1321 return [nullid], [], []
1322
1322
1323 # assume we're closer to the tip than the root
1323 # assume we're closer to the tip than the root
1324 # and start by examining the heads
1324 # and start by examining the heads
1325 self.ui.status(_("searching for changes\n"))
1325 self.ui.status(_("searching for changes\n"))
1326
1326
1327 unknown = []
1327 unknown = []
1328 for h in heads:
1328 for h in heads:
1329 if h not in m:
1329 if h not in m:
1330 unknown.append(h)
1330 unknown.append(h)
1331 else:
1331 else:
1332 base[h] = 1
1332 base[h] = 1
1333
1333
1334 heads = unknown
1334 heads = unknown
1335 if not unknown:
1335 if not unknown:
1336 return base.keys(), [], []
1336 return base.keys(), [], []
1337
1337
1338 req = dict.fromkeys(unknown)
1338 req = dict.fromkeys(unknown)
1339 reqcnt = 0
1339 reqcnt = 0
1340
1340
1341 # search through remote branches
1341 # search through remote branches
1342 # a 'branch' here is a linear segment of history, with four parts:
1342 # a 'branch' here is a linear segment of history, with four parts:
1343 # head, root, first parent, second parent
1343 # head, root, first parent, second parent
1344 # (a branch always has two parents (or none) by definition)
1344 # (a branch always has two parents (or none) by definition)
1345 unknown = remote.branches(unknown)
1345 unknown = remote.branches(unknown)
1346 while unknown:
1346 while unknown:
1347 r = []
1347 r = []
1348 while unknown:
1348 while unknown:
1349 n = unknown.pop(0)
1349 n = unknown.pop(0)
1350 if n[0] in seen:
1350 if n[0] in seen:
1351 continue
1351 continue
1352
1352
1353 self.ui.debug(_("examining %s:%s\n")
1353 self.ui.debug(_("examining %s:%s\n")
1354 % (short(n[0]), short(n[1])))
1354 % (short(n[0]), short(n[1])))
1355 if n[0] == nullid: # found the end of the branch
1355 if n[0] == nullid: # found the end of the branch
1356 pass
1356 pass
1357 elif n in seenbranch:
1357 elif n in seenbranch:
1358 self.ui.debug(_("branch already found\n"))
1358 self.ui.debug(_("branch already found\n"))
1359 continue
1359 continue
1360 elif n[1] and n[1] in m: # do we know the base?
1360 elif n[1] and n[1] in m: # do we know the base?
1361 self.ui.debug(_("found incomplete branch %s:%s\n")
1361 self.ui.debug(_("found incomplete branch %s:%s\n")
1362 % (short(n[0]), short(n[1])))
1362 % (short(n[0]), short(n[1])))
1363 search.append(n[0:2]) # schedule branch range for scanning
1363 search.append(n[0:2]) # schedule branch range for scanning
1364 seenbranch[n] = 1
1364 seenbranch[n] = 1
1365 else:
1365 else:
1366 if n[1] not in seen and n[1] not in fetch:
1366 if n[1] not in seen and n[1] not in fetch:
1367 if n[2] in m and n[3] in m:
1367 if n[2] in m and n[3] in m:
1368 self.ui.debug(_("found new changeset %s\n") %
1368 self.ui.debug(_("found new changeset %s\n") %
1369 short(n[1]))
1369 short(n[1]))
1370 fetch[n[1]] = 1 # earliest unknown
1370 fetch[n[1]] = 1 # earliest unknown
1371 for p in n[2:4]:
1371 for p in n[2:4]:
1372 if p in m:
1372 if p in m:
1373 base[p] = 1 # latest known
1373 base[p] = 1 # latest known
1374
1374
1375 for p in n[2:4]:
1375 for p in n[2:4]:
1376 if p not in req and p not in m:
1376 if p not in req and p not in m:
1377 r.append(p)
1377 r.append(p)
1378 req[p] = 1
1378 req[p] = 1
1379 seen[n[0]] = 1
1379 seen[n[0]] = 1
1380
1380
1381 if r:
1381 if r:
1382 reqcnt += 1
1382 reqcnt += 1
1383 self.ui.debug(_("request %d: %s\n") %
1383 self.ui.debug(_("request %d: %s\n") %
1384 (reqcnt, " ".join(map(short, r))))
1384 (reqcnt, " ".join(map(short, r))))
1385 for p in xrange(0, len(r), 10):
1385 for p in xrange(0, len(r), 10):
1386 for b in remote.branches(r[p:p+10]):
1386 for b in remote.branches(r[p:p+10]):
1387 self.ui.debug(_("received %s:%s\n") %
1387 self.ui.debug(_("received %s:%s\n") %
1388 (short(b[0]), short(b[1])))
1388 (short(b[0]), short(b[1])))
1389 unknown.append(b)
1389 unknown.append(b)
1390
1390
1391 # do binary search on the branches we found
1391 # do binary search on the branches we found
1392 while search:
1392 while search:
1393 newsearch = []
1393 newsearch = []
1394 reqcnt += 1
1394 reqcnt += 1
1395 for n, l in zip(search, remote.between(search)):
1395 for n, l in zip(search, remote.between(search)):
1396 l.append(n[1])
1396 l.append(n[1])
1397 p = n[0]
1397 p = n[0]
1398 f = 1
1398 f = 1
1399 for i in l:
1399 for i in l:
1400 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1400 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1401 if i in m:
1401 if i in m:
1402 if f <= 2:
1402 if f <= 2:
1403 self.ui.debug(_("found new branch changeset %s\n") %
1403 self.ui.debug(_("found new branch changeset %s\n") %
1404 short(p))
1404 short(p))
1405 fetch[p] = 1
1405 fetch[p] = 1
1406 base[i] = 1
1406 base[i] = 1
1407 else:
1407 else:
1408 self.ui.debug(_("narrowed branch search to %s:%s\n")
1408 self.ui.debug(_("narrowed branch search to %s:%s\n")
1409 % (short(p), short(i)))
1409 % (short(p), short(i)))
1410 newsearch.append((p, i))
1410 newsearch.append((p, i))
1411 break
1411 break
1412 p, f = i, f * 2
1412 p, f = i, f * 2
1413 search = newsearch
1413 search = newsearch
1414
1414
1415 # sanity check our fetch list
1415 # sanity check our fetch list
1416 for f in fetch.keys():
1416 for f in fetch.keys():
1417 if f in m:
1417 if f in m:
1418 raise error.RepoError(_("already have changeset ")
1418 raise error.RepoError(_("already have changeset ")
1419 + short(f[:4]))
1419 + short(f[:4]))
1420
1420
1421 if base.keys() == [nullid]:
1421 if base.keys() == [nullid]:
1422 if force:
1422 if force:
1423 self.ui.warn(_("warning: repository is unrelated\n"))
1423 self.ui.warn(_("warning: repository is unrelated\n"))
1424 else:
1424 else:
1425 raise util.Abort(_("repository is unrelated"))
1425 raise util.Abort(_("repository is unrelated"))
1426
1426
1427 self.ui.debug(_("found new changesets starting at ") +
1427 self.ui.debug(_("found new changesets starting at ") +
1428 " ".join([short(f) for f in fetch]) + "\n")
1428 " ".join([short(f) for f in fetch]) + "\n")
1429
1429
1430 self.ui.debug(_("%d total queries\n") % reqcnt)
1430 self.ui.debug(_("%d total queries\n") % reqcnt)
1431
1431
1432 return base.keys(), fetch.keys(), heads
1432 return base.keys(), fetch.keys(), heads
1433
1433
1434 def findoutgoing(self, remote, base=None, heads=None, force=False):
1434 def findoutgoing(self, remote, base=None, heads=None, force=False):
1435 """Return list of nodes that are roots of subsets not in remote
1435 """Return list of nodes that are roots of subsets not in remote
1436
1436
1437 If base dict is specified, assume that these nodes and their parents
1437 If base dict is specified, assume that these nodes and their parents
1438 exist on the remote side.
1438 exist on the remote side.
1439 If a list of heads is specified, return only nodes which are heads
1439 If a list of heads is specified, return only nodes which are heads
1440 or ancestors of these heads, and return a second element which
1440 or ancestors of these heads, and return a second element which
1441 contains all remote heads which get new children.
1441 contains all remote heads which get new children.
1442 """
1442 """
1443 if base == None:
1443 if base == None:
1444 base = {}
1444 base = {}
1445 self.findincoming(remote, base, heads, force=force)
1445 self.findincoming(remote, base, heads, force=force)
1446
1446
1447 self.ui.debug(_("common changesets up to ")
1447 self.ui.debug(_("common changesets up to ")
1448 + " ".join(map(short, base.keys())) + "\n")
1448 + " ".join(map(short, base.keys())) + "\n")
1449
1449
1450 remain = dict.fromkeys(self.changelog.nodemap)
1450 remain = dict.fromkeys(self.changelog.nodemap)
1451
1451
1452 # prune everything remote has from the tree
1452 # prune everything remote has from the tree
1453 del remain[nullid]
1453 del remain[nullid]
1454 remove = base.keys()
1454 remove = base.keys()
1455 while remove:
1455 while remove:
1456 n = remove.pop(0)
1456 n = remove.pop(0)
1457 if n in remain:
1457 if n in remain:
1458 del remain[n]
1458 del remain[n]
1459 for p in self.changelog.parents(n):
1459 for p in self.changelog.parents(n):
1460 remove.append(p)
1460 remove.append(p)
1461
1461
1462 # find every node whose parents have been pruned
1462 # find every node whose parents have been pruned
1463 subset = []
1463 subset = []
1464 # find every remote head that will get new children
1464 # find every remote head that will get new children
1465 updated_heads = {}
1465 updated_heads = {}
1466 for n in remain:
1466 for n in remain:
1467 p1, p2 = self.changelog.parents(n)
1467 p1, p2 = self.changelog.parents(n)
1468 if p1 not in remain and p2 not in remain:
1468 if p1 not in remain and p2 not in remain:
1469 subset.append(n)
1469 subset.append(n)
1470 if heads:
1470 if heads:
1471 if p1 in heads:
1471 if p1 in heads:
1472 updated_heads[p1] = True
1472 updated_heads[p1] = True
1473 if p2 in heads:
1473 if p2 in heads:
1474 updated_heads[p2] = True
1474 updated_heads[p2] = True
1475
1475
1476 # this is the set of all roots we have to push
1476 # this is the set of all roots we have to push
1477 if heads:
1477 if heads:
1478 return subset, updated_heads.keys()
1478 return subset, updated_heads.keys()
1479 else:
1479 else:
1480 return subset
1480 return subset
1481
1481
1482 def pull(self, remote, heads=None, force=False):
1482 def pull(self, remote, heads=None, force=False):
1483 lock = self.lock()
1483 lock = self.lock()
1484 try:
1484 try:
1485 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1485 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1486 force=force)
1486 force=force)
1487 if fetch == [nullid]:
1487 if fetch == [nullid]:
1488 self.ui.status(_("requesting all changes\n"))
1488 self.ui.status(_("requesting all changes\n"))
1489
1489
1490 if not fetch:
1490 if not fetch:
1491 self.ui.status(_("no changes found\n"))
1491 self.ui.status(_("no changes found\n"))
1492 return 0
1492 return 0
1493
1493
1494 if heads is None and remote.capable('changegroupsubset'):
1494 if heads is None and remote.capable('changegroupsubset'):
1495 heads = rheads
1495 heads = rheads
1496
1496
1497 if heads is None:
1497 if heads is None:
1498 cg = remote.changegroup(fetch, 'pull')
1498 cg = remote.changegroup(fetch, 'pull')
1499 else:
1499 else:
1500 if not remote.capable('changegroupsubset'):
1500 if not remote.capable('changegroupsubset'):
1501 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1501 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1502 cg = remote.changegroupsubset(fetch, heads, 'pull')
1502 cg = remote.changegroupsubset(fetch, heads, 'pull')
1503 return self.addchangegroup(cg, 'pull', remote.url())
1503 return self.addchangegroup(cg, 'pull', remote.url())
1504 finally:
1504 finally:
1505 lock.release()
1505 lock.release()
1506
1506
1507 def push(self, remote, force=False, revs=None):
1507 def push(self, remote, force=False, revs=None):
1508 # there are two ways to push to remote repo:
1508 # there are two ways to push to remote repo:
1509 #
1509 #
1510 # addchangegroup assumes local user can lock remote
1510 # addchangegroup assumes local user can lock remote
1511 # repo (local filesystem, old ssh servers).
1511 # repo (local filesystem, old ssh servers).
1512 #
1512 #
1513 # unbundle assumes local user cannot lock remote repo (new ssh
1513 # unbundle assumes local user cannot lock remote repo (new ssh
1514 # servers, http servers).
1514 # servers, http servers).
1515
1515
1516 if remote.capable('unbundle'):
1516 if remote.capable('unbundle'):
1517 return self.push_unbundle(remote, force, revs)
1517 return self.push_unbundle(remote, force, revs)
1518 return self.push_addchangegroup(remote, force, revs)
1518 return self.push_addchangegroup(remote, force, revs)
1519
1519
1520 def prepush(self, remote, force, revs):
1520 def prepush(self, remote, force, revs):
1521 common = {}
1521 common = {}
1522 remote_heads = remote.heads()
1522 remote_heads = remote.heads()
1523 inc = self.findincoming(remote, common, remote_heads, force=force)
1523 inc = self.findincoming(remote, common, remote_heads, force=force)
1524
1524
1525 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1525 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1526 if revs is not None:
1526 if revs is not None:
1527 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1527 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1528 else:
1528 else:
1529 bases, heads = update, self.changelog.heads()
1529 bases, heads = update, self.changelog.heads()
1530
1530
1531 if not bases:
1531 if not bases:
1532 self.ui.status(_("no changes found\n"))
1532 self.ui.status(_("no changes found\n"))
1533 return None, 1
1533 return None, 1
1534 elif not force:
1534 elif not force:
1535 # check if we're creating new remote heads
1535 # check if we're creating new remote heads
1536 # to be a remote head after push, node must be either
1536 # to be a remote head after push, node must be either
1537 # - unknown locally
1537 # - unknown locally
1538 # - a local outgoing head descended from update
1538 # - a local outgoing head descended from update
1539 # - a remote head that's known locally and not
1539 # - a remote head that's known locally and not
1540 # ancestral to an outgoing head
1540 # ancestral to an outgoing head
1541
1541
1542 warn = 0
1542 warn = 0
1543
1543
1544 if remote_heads == [nullid]:
1544 if remote_heads == [nullid]:
1545 warn = 0
1545 warn = 0
1546 elif not revs and len(heads) > len(remote_heads):
1546 elif not revs and len(heads) > len(remote_heads):
1547 warn = 1
1547 warn = 1
1548 else:
1548 else:
1549 newheads = list(heads)
1549 newheads = list(heads)
1550 for r in remote_heads:
1550 for r in remote_heads:
1551 if r in self.changelog.nodemap:
1551 if r in self.changelog.nodemap:
1552 desc = self.changelog.heads(r, heads)
1552 desc = self.changelog.heads(r, heads)
1553 l = [h for h in heads if h in desc]
1553 l = [h for h in heads if h in desc]
1554 if not l:
1554 if not l:
1555 newheads.append(r)
1555 newheads.append(r)
1556 else:
1556 else:
1557 newheads.append(r)
1557 newheads.append(r)
1558 if len(newheads) > len(remote_heads):
1558 if len(newheads) > len(remote_heads):
1559 warn = 1
1559 warn = 1
1560
1560
1561 if warn:
1561 if warn:
1562 self.ui.warn(_("abort: push creates new remote heads!\n"))
1562 self.ui.warn(_("abort: push creates new remote heads!\n"))
1563 self.ui.status(_("(did you forget to merge?"
1563 self.ui.status(_("(did you forget to merge?"
1564 " use push -f to force)\n"))
1564 " use push -f to force)\n"))
1565 return None, 0
1565 return None, 0
1566 elif inc:
1566 elif inc:
1567 self.ui.warn(_("note: unsynced remote changes!\n"))
1567 self.ui.warn(_("note: unsynced remote changes!\n"))
1568
1568
1569
1569
1570 if revs is None:
1570 if revs is None:
1571 # use the fast path, no race possible on push
1571 # use the fast path, no race possible on push
1572 cg = self._changegroup(common.keys(), 'push')
1572 cg = self._changegroup(common.keys(), 'push')
1573 else:
1573 else:
1574 cg = self.changegroupsubset(update, revs, 'push')
1574 cg = self.changegroupsubset(update, revs, 'push')
1575 return cg, remote_heads
1575 return cg, remote_heads
1576
1576
1577 def push_addchangegroup(self, remote, force, revs):
1577 def push_addchangegroup(self, remote, force, revs):
1578 lock = remote.lock()
1578 lock = remote.lock()
1579 try:
1579 try:
1580 ret = self.prepush(remote, force, revs)
1580 ret = self.prepush(remote, force, revs)
1581 if ret[0] is not None:
1581 if ret[0] is not None:
1582 cg, remote_heads = ret
1582 cg, remote_heads = ret
1583 return remote.addchangegroup(cg, 'push', self.url())
1583 return remote.addchangegroup(cg, 'push', self.url())
1584 return ret[1]
1584 return ret[1]
1585 finally:
1585 finally:
1586 lock.release()
1586 lock.release()
1587
1587
1588 def push_unbundle(self, remote, force, revs):
1588 def push_unbundle(self, remote, force, revs):
1589 # local repo finds heads on server, finds out what revs it
1589 # local repo finds heads on server, finds out what revs it
1590 # must push. once revs transferred, if server finds it has
1590 # must push. once revs transferred, if server finds it has
1591 # different heads (someone else won commit/push race), server
1591 # different heads (someone else won commit/push race), server
1592 # aborts.
1592 # aborts.
1593
1593
1594 ret = self.prepush(remote, force, revs)
1594 ret = self.prepush(remote, force, revs)
1595 if ret[0] is not None:
1595 if ret[0] is not None:
1596 cg, remote_heads = ret
1596 cg, remote_heads = ret
1597 if force: remote_heads = ['force']
1597 if force: remote_heads = ['force']
1598 return remote.unbundle(cg, remote_heads, 'push')
1598 return remote.unbundle(cg, remote_heads, 'push')
1599 return ret[1]
1599 return ret[1]
1600
1600
1601 def changegroupinfo(self, nodes, source):
1601 def changegroupinfo(self, nodes, source):
1602 if self.ui.verbose or source == 'bundle':
1602 if self.ui.verbose or source == 'bundle':
1603 self.ui.status(_("%d changesets found\n") % len(nodes))
1603 self.ui.status(_("%d changesets found\n") % len(nodes))
1604 if self.ui.debugflag:
1604 if self.ui.debugflag:
1605 self.ui.debug(_("list of changesets:\n"))
1605 self.ui.debug(_("list of changesets:\n"))
1606 for node in nodes:
1606 for node in nodes:
1607 self.ui.debug("%s\n" % hex(node))
1607 self.ui.debug("%s\n" % hex(node))
1608
1608
1609 def changegroupsubset(self, bases, heads, source, extranodes=None):
1609 def changegroupsubset(self, bases, heads, source, extranodes=None):
1610 """This function generates a changegroup consisting of all the nodes
1610 """This function generates a changegroup consisting of all the nodes
1611 that are descendents of any of the bases, and ancestors of any of
1611 that are descendents of any of the bases, and ancestors of any of
1612 the heads.
1612 the heads.
1613
1613
1614 It is fairly complex as determining which filenodes and which
1614 It is fairly complex as determining which filenodes and which
1615 manifest nodes need to be included for the changeset to be complete
1615 manifest nodes need to be included for the changeset to be complete
1616 is non-trivial.
1616 is non-trivial.
1617
1617
1618 Another wrinkle is doing the reverse, figuring out which changeset in
1618 Another wrinkle is doing the reverse, figuring out which changeset in
1619 the changegroup a particular filenode or manifestnode belongs to.
1619 the changegroup a particular filenode or manifestnode belongs to.
1620
1620
1621 The caller can specify some nodes that must be included in the
1621 The caller can specify some nodes that must be included in the
1622 changegroup using the extranodes argument. It should be a dict
1622 changegroup using the extranodes argument. It should be a dict
1623 where the keys are the filenames (or 1 for the manifest), and the
1623 where the keys are the filenames (or 1 for the manifest), and the
1624 values are lists of (node, linknode) tuples, where node is a wanted
1624 values are lists of (node, linknode) tuples, where node is a wanted
1625 node and linknode is the changelog node that should be transmitted as
1625 node and linknode is the changelog node that should be transmitted as
1626 the linkrev.
1626 the linkrev.
1627 """
1627 """
1628
1628
1629 if extranodes is None:
1629 if extranodes is None:
1630 # can we go through the fast path ?
1630 # can we go through the fast path ?
1631 heads.sort()
1631 heads.sort()
1632 allheads = self.heads()
1632 allheads = self.heads()
1633 allheads.sort()
1633 allheads.sort()
1634 if heads == allheads:
1634 if heads == allheads:
1635 common = []
1635 common = []
1636 # parents of bases are known from both sides
1636 # parents of bases are known from both sides
1637 for n in bases:
1637 for n in bases:
1638 for p in self.changelog.parents(n):
1638 for p in self.changelog.parents(n):
1639 if p != nullid:
1639 if p != nullid:
1640 common.append(p)
1640 common.append(p)
1641 return self._changegroup(common, source)
1641 return self._changegroup(common, source)
1642
1642
1643 self.hook('preoutgoing', throw=True, source=source)
1643 self.hook('preoutgoing', throw=True, source=source)
1644
1644
1645 # Set up some initial variables
1645 # Set up some initial variables
1646 # Make it easy to refer to self.changelog
1646 # Make it easy to refer to self.changelog
1647 cl = self.changelog
1647 cl = self.changelog
1648 # msng is short for missing - compute the list of changesets in this
1648 # msng is short for missing - compute the list of changesets in this
1649 # changegroup.
1649 # changegroup.
1650 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1650 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1651 self.changegroupinfo(msng_cl_lst, source)
1651 self.changegroupinfo(msng_cl_lst, source)
1652 # Some bases may turn out to be superfluous, and some heads may be
1652 # Some bases may turn out to be superfluous, and some heads may be
1653 # too. nodesbetween will return the minimal set of bases and heads
1653 # too. nodesbetween will return the minimal set of bases and heads
1654 # necessary to re-create the changegroup.
1654 # necessary to re-create the changegroup.
1655
1655
1656 # Known heads are the list of heads that it is assumed the recipient
1656 # Known heads are the list of heads that it is assumed the recipient
1657 # of this changegroup will know about.
1657 # of this changegroup will know about.
1658 knownheads = {}
1658 knownheads = {}
1659 # We assume that all parents of bases are known heads.
1659 # We assume that all parents of bases are known heads.
1660 for n in bases:
1660 for n in bases:
1661 for p in cl.parents(n):
1661 for p in cl.parents(n):
1662 if p != nullid:
1662 if p != nullid:
1663 knownheads[p] = 1
1663 knownheads[p] = 1
1664 knownheads = knownheads.keys()
1664 knownheads = knownheads.keys()
1665 if knownheads:
1665 if knownheads:
1666 # Now that we know what heads are known, we can compute which
1666 # Now that we know what heads are known, we can compute which
1667 # changesets are known. The recipient must know about all
1667 # changesets are known. The recipient must know about all
1668 # changesets required to reach the known heads from the null
1668 # changesets required to reach the known heads from the null
1669 # changeset.
1669 # changeset.
1670 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1670 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1671 junk = None
1671 junk = None
1672 # Transform the list into an ersatz set.
1672 # Transform the list into an ersatz set.
1673 has_cl_set = dict.fromkeys(has_cl_set)
1673 has_cl_set = dict.fromkeys(has_cl_set)
1674 else:
1674 else:
1675 # If there were no known heads, the recipient cannot be assumed to
1675 # If there were no known heads, the recipient cannot be assumed to
1676 # know about any changesets.
1676 # know about any changesets.
1677 has_cl_set = {}
1677 has_cl_set = {}
1678
1678
1679 # Make it easy to refer to self.manifest
1679 # Make it easy to refer to self.manifest
1680 mnfst = self.manifest
1680 mnfst = self.manifest
1681 # We don't know which manifests are missing yet
1681 # We don't know which manifests are missing yet
1682 msng_mnfst_set = {}
1682 msng_mnfst_set = {}
1683 # Nor do we know which filenodes are missing.
1683 # Nor do we know which filenodes are missing.
1684 msng_filenode_set = {}
1684 msng_filenode_set = {}
1685
1685
1686 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1686 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1687 junk = None
1687 junk = None
1688
1688
1689 # A changeset always belongs to itself, so the changenode lookup
1689 # A changeset always belongs to itself, so the changenode lookup
1690 # function for a changenode is identity.
1690 # function for a changenode is identity.
1691 def identity(x):
1691 def identity(x):
1692 return x
1692 return x
1693
1693
1694 # A function generating function. Sets up an environment for the
1694 # A function generating function. Sets up an environment for the
1695 # inner function.
1695 # inner function.
1696 def cmp_by_rev_func(revlog):
1696 def cmp_by_rev_func(revlog):
1697 # Compare two nodes by their revision number in the environment's
1697 # Compare two nodes by their revision number in the environment's
1698 # revision history. Since the revision number both represents the
1698 # revision history. Since the revision number both represents the
1699 # most efficient order to read the nodes in, and represents a
1699 # most efficient order to read the nodes in, and represents a
1700 # topological sorting of the nodes, this function is often useful.
1700 # topological sorting of the nodes, this function is often useful.
1701 def cmp_by_rev(a, b):
1701 def cmp_by_rev(a, b):
1702 return cmp(revlog.rev(a), revlog.rev(b))
1702 return cmp(revlog.rev(a), revlog.rev(b))
1703 return cmp_by_rev
1703 return cmp_by_rev
1704
1704
1705 # If we determine that a particular file or manifest node must be a
1705 # If we determine that a particular file or manifest node must be a
1706 # node that the recipient of the changegroup will already have, we can
1706 # node that the recipient of the changegroup will already have, we can
1707 # also assume the recipient will have all the parents. This function
1707 # also assume the recipient will have all the parents. This function
1708 # prunes them from the set of missing nodes.
1708 # prunes them from the set of missing nodes.
1709 def prune_parents(revlog, hasset, msngset):
1709 def prune_parents(revlog, hasset, msngset):
1710 haslst = hasset.keys()
1710 haslst = hasset.keys()
1711 haslst.sort(cmp_by_rev_func(revlog))
1711 haslst.sort(cmp_by_rev_func(revlog))
1712 for node in haslst:
1712 for node in haslst:
1713 parentlst = [p for p in revlog.parents(node) if p != nullid]
1713 parentlst = [p for p in revlog.parents(node) if p != nullid]
1714 while parentlst:
1714 while parentlst:
1715 n = parentlst.pop()
1715 n = parentlst.pop()
1716 if n not in hasset:
1716 if n not in hasset:
1717 hasset[n] = 1
1717 hasset[n] = 1
1718 p = [p for p in revlog.parents(n) if p != nullid]
1718 p = [p for p in revlog.parents(n) if p != nullid]
1719 parentlst.extend(p)
1719 parentlst.extend(p)
1720 for n in hasset:
1720 for n in hasset:
1721 msngset.pop(n, None)
1721 msngset.pop(n, None)
1722
1722
1723 # This is a function generating function used to set up an environment
1723 # This is a function generating function used to set up an environment
1724 # for the inner function to execute in.
1724 # for the inner function to execute in.
1725 def manifest_and_file_collector(changedfileset):
1725 def manifest_and_file_collector(changedfileset):
1726 # This is an information gathering function that gathers
1726 # This is an information gathering function that gathers
1727 # information from each changeset node that goes out as part of
1727 # information from each changeset node that goes out as part of
1728 # the changegroup. The information gathered is a list of which
1728 # the changegroup. The information gathered is a list of which
1729 # manifest nodes are potentially required (the recipient may
1729 # manifest nodes are potentially required (the recipient may
1730 # already have them) and total list of all files which were
1730 # already have them) and total list of all files which were
1731 # changed in any changeset in the changegroup.
1731 # changed in any changeset in the changegroup.
1732 #
1732 #
1733 # We also remember the first changenode we saw any manifest
1733 # We also remember the first changenode we saw any manifest
1734 # referenced by so we can later determine which changenode 'owns'
1734 # referenced by so we can later determine which changenode 'owns'
1735 # the manifest.
1735 # the manifest.
1736 def collect_manifests_and_files(clnode):
1736 def collect_manifests_and_files(clnode):
1737 c = cl.read(clnode)
1737 c = cl.read(clnode)
1738 for f in c[3]:
1738 for f in c[3]:
1739 # This is to make sure we only have one instance of each
1739 # This is to make sure we only have one instance of each
1740 # filename string for each filename.
1740 # filename string for each filename.
1741 changedfileset.setdefault(f, f)
1741 changedfileset.setdefault(f, f)
1742 msng_mnfst_set.setdefault(c[0], clnode)
1742 msng_mnfst_set.setdefault(c[0], clnode)
1743 return collect_manifests_and_files
1743 return collect_manifests_and_files
1744
1744
1745 # Figure out which manifest nodes (of the ones we think might be part
1745 # Figure out which manifest nodes (of the ones we think might be part
1746 # of the changegroup) the recipient must know about and remove them
1746 # of the changegroup) the recipient must know about and remove them
1747 # from the changegroup.
1747 # from the changegroup.
1748 def prune_manifests():
1748 def prune_manifests():
1749 has_mnfst_set = {}
1749 has_mnfst_set = {}
1750 for n in msng_mnfst_set:
1750 for n in msng_mnfst_set:
1751 # If a 'missing' manifest thinks it belongs to a changenode
1751 # If a 'missing' manifest thinks it belongs to a changenode
1752 # the recipient is assumed to have, obviously the recipient
1752 # the recipient is assumed to have, obviously the recipient
1753 # must have that manifest.
1753 # must have that manifest.
1754 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1754 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1755 if linknode in has_cl_set:
1755 if linknode in has_cl_set:
1756 has_mnfst_set[n] = 1
1756 has_mnfst_set[n] = 1
1757 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1757 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1758
1758
1759 # Use the information collected in collect_manifests_and_files to say
1759 # Use the information collected in collect_manifests_and_files to say
1760 # which changenode any manifestnode belongs to.
1760 # which changenode any manifestnode belongs to.
1761 def lookup_manifest_link(mnfstnode):
1761 def lookup_manifest_link(mnfstnode):
1762 return msng_mnfst_set[mnfstnode]
1762 return msng_mnfst_set[mnfstnode]
1763
1763
1764 # A function generating function that sets up the initial environment
1764 # A function generating function that sets up the initial environment
1765 # the inner function.
1765 # the inner function.
1766 def filenode_collector(changedfiles):
1766 def filenode_collector(changedfiles):
1767 next_rev = [0]
1767 next_rev = [0]
1768 # This gathers information from each manifestnode included in the
1768 # This gathers information from each manifestnode included in the
1769 # changegroup about which filenodes the manifest node references
1769 # changegroup about which filenodes the manifest node references
1770 # so we can include those in the changegroup too.
1770 # so we can include those in the changegroup too.
1771 #
1771 #
1772 # It also remembers which changenode each filenode belongs to. It
1772 # It also remembers which changenode each filenode belongs to. It
1773 # does this by assuming the a filenode belongs to the changenode
1773 # does this by assuming the a filenode belongs to the changenode
1774 # the first manifest that references it belongs to.
1774 # the first manifest that references it belongs to.
1775 def collect_msng_filenodes(mnfstnode):
1775 def collect_msng_filenodes(mnfstnode):
1776 r = mnfst.rev(mnfstnode)
1776 r = mnfst.rev(mnfstnode)
1777 if r == next_rev[0]:
1777 if r == next_rev[0]:
1778 # If the last rev we looked at was the one just previous,
1778 # If the last rev we looked at was the one just previous,
1779 # we only need to see a diff.
1779 # we only need to see a diff.
1780 deltamf = mnfst.readdelta(mnfstnode)
1780 deltamf = mnfst.readdelta(mnfstnode)
1781 # For each line in the delta
1781 # For each line in the delta
1782 for f, fnode in deltamf.iteritems():
1782 for f, fnode in deltamf.iteritems():
1783 f = changedfiles.get(f, None)
1783 f = changedfiles.get(f, None)
1784 # And if the file is in the list of files we care
1784 # And if the file is in the list of files we care
1785 # about.
1785 # about.
1786 if f is not None:
1786 if f is not None:
1787 # Get the changenode this manifest belongs to
1787 # Get the changenode this manifest belongs to
1788 clnode = msng_mnfst_set[mnfstnode]
1788 clnode = msng_mnfst_set[mnfstnode]
1789 # Create the set of filenodes for the file if
1789 # Create the set of filenodes for the file if
1790 # there isn't one already.
1790 # there isn't one already.
1791 ndset = msng_filenode_set.setdefault(f, {})
1791 ndset = msng_filenode_set.setdefault(f, {})
1792 # And set the filenode's changelog node to the
1792 # And set the filenode's changelog node to the
1793 # manifest's if it hasn't been set already.
1793 # manifest's if it hasn't been set already.
1794 ndset.setdefault(fnode, clnode)
1794 ndset.setdefault(fnode, clnode)
1795 else:
1795 else:
1796 # Otherwise we need a full manifest.
1796 # Otherwise we need a full manifest.
1797 m = mnfst.read(mnfstnode)
1797 m = mnfst.read(mnfstnode)
1798 # For every file in we care about.
1798 # For every file in we care about.
1799 for f in changedfiles:
1799 for f in changedfiles:
1800 fnode = m.get(f, None)
1800 fnode = m.get(f, None)
1801 # If it's in the manifest
1801 # If it's in the manifest
1802 if fnode is not None:
1802 if fnode is not None:
1803 # See comments above.
1803 # See comments above.
1804 clnode = msng_mnfst_set[mnfstnode]
1804 clnode = msng_mnfst_set[mnfstnode]
1805 ndset = msng_filenode_set.setdefault(f, {})
1805 ndset = msng_filenode_set.setdefault(f, {})
1806 ndset.setdefault(fnode, clnode)
1806 ndset.setdefault(fnode, clnode)
1807 # Remember the revision we hope to see next.
1807 # Remember the revision we hope to see next.
1808 next_rev[0] = r + 1
1808 next_rev[0] = r + 1
1809 return collect_msng_filenodes
1809 return collect_msng_filenodes
1810
1810
1811 # We have a list of filenodes we think we need for a file, lets remove
1811 # We have a list of filenodes we think we need for a file, lets remove
1812 # all those we now the recipient must have.
1812 # all those we now the recipient must have.
1813 def prune_filenodes(f, filerevlog):
1813 def prune_filenodes(f, filerevlog):
1814 msngset = msng_filenode_set[f]
1814 msngset = msng_filenode_set[f]
1815 hasset = {}
1815 hasset = {}
1816 # If a 'missing' filenode thinks it belongs to a changenode we
1816 # If a 'missing' filenode thinks it belongs to a changenode we
1817 # assume the recipient must have, then the recipient must have
1817 # assume the recipient must have, then the recipient must have
1818 # that filenode.
1818 # that filenode.
1819 for n in msngset:
1819 for n in msngset:
1820 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1820 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1821 if clnode in has_cl_set:
1821 if clnode in has_cl_set:
1822 hasset[n] = 1
1822 hasset[n] = 1
1823 prune_parents(filerevlog, hasset, msngset)
1823 prune_parents(filerevlog, hasset, msngset)
1824
1824
1825 # A function generator function that sets up the a context for the
1825 # A function generator function that sets up the a context for the
1826 # inner function.
1826 # inner function.
1827 def lookup_filenode_link_func(fname):
1827 def lookup_filenode_link_func(fname):
1828 msngset = msng_filenode_set[fname]
1828 msngset = msng_filenode_set[fname]
1829 # Lookup the changenode the filenode belongs to.
1829 # Lookup the changenode the filenode belongs to.
1830 def lookup_filenode_link(fnode):
1830 def lookup_filenode_link(fnode):
1831 return msngset[fnode]
1831 return msngset[fnode]
1832 return lookup_filenode_link
1832 return lookup_filenode_link
1833
1833
1834 # Add the nodes that were explicitly requested.
1834 # Add the nodes that were explicitly requested.
1835 def add_extra_nodes(name, nodes):
1835 def add_extra_nodes(name, nodes):
1836 if not extranodes or name not in extranodes:
1836 if not extranodes or name not in extranodes:
1837 return
1837 return
1838
1838
1839 for node, linknode in extranodes[name]:
1839 for node, linknode in extranodes[name]:
1840 if node not in nodes:
1840 if node not in nodes:
1841 nodes[node] = linknode
1841 nodes[node] = linknode
1842
1842
1843 # Now that we have all theses utility functions to help out and
1843 # Now that we have all theses utility functions to help out and
1844 # logically divide up the task, generate the group.
1844 # logically divide up the task, generate the group.
1845 def gengroup():
1845 def gengroup():
1846 # The set of changed files starts empty.
1846 # The set of changed files starts empty.
1847 changedfiles = {}
1847 changedfiles = {}
1848 # Create a changenode group generator that will call our functions
1848 # Create a changenode group generator that will call our functions
1849 # back to lookup the owning changenode and collect information.
1849 # back to lookup the owning changenode and collect information.
1850 group = cl.group(msng_cl_lst, identity,
1850 group = cl.group(msng_cl_lst, identity,
1851 manifest_and_file_collector(changedfiles))
1851 manifest_and_file_collector(changedfiles))
1852 for chnk in group:
1852 for chnk in group:
1853 yield chnk
1853 yield chnk
1854
1854
1855 # The list of manifests has been collected by the generator
1855 # The list of manifests has been collected by the generator
1856 # calling our functions back.
1856 # calling our functions back.
1857 prune_manifests()
1857 prune_manifests()
1858 add_extra_nodes(1, msng_mnfst_set)
1858 add_extra_nodes(1, msng_mnfst_set)
1859 msng_mnfst_lst = msng_mnfst_set.keys()
1859 msng_mnfst_lst = msng_mnfst_set.keys()
1860 # Sort the manifestnodes by revision number.
1860 # Sort the manifestnodes by revision number.
1861 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1861 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1862 # Create a generator for the manifestnodes that calls our lookup
1862 # Create a generator for the manifestnodes that calls our lookup
1863 # and data collection functions back.
1863 # and data collection functions back.
1864 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1864 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1865 filenode_collector(changedfiles))
1865 filenode_collector(changedfiles))
1866 for chnk in group:
1866 for chnk in group:
1867 yield chnk
1867 yield chnk
1868
1868
1869 # These are no longer needed, dereference and toss the memory for
1869 # These are no longer needed, dereference and toss the memory for
1870 # them.
1870 # them.
1871 msng_mnfst_lst = None
1871 msng_mnfst_lst = None
1872 msng_mnfst_set.clear()
1872 msng_mnfst_set.clear()
1873
1873
1874 if extranodes:
1874 if extranodes:
1875 for fname in extranodes:
1875 for fname in extranodes:
1876 if isinstance(fname, int):
1876 if isinstance(fname, int):
1877 continue
1877 continue
1878 msng_filenode_set.setdefault(fname, {})
1878 msng_filenode_set.setdefault(fname, {})
1879 changedfiles[fname] = 1
1879 changedfiles[fname] = 1
1880 # Go through all our files in order sorted by name.
1880 # Go through all our files in order sorted by name.
1881 for fname in util.sort(changedfiles):
1881 for fname in util.sort(changedfiles):
1882 filerevlog = self.file(fname)
1882 filerevlog = self.file(fname)
1883 if not len(filerevlog):
1883 if not len(filerevlog):
1884 raise util.Abort(_("empty or missing revlog for %s") % fname)
1884 raise util.Abort(_("empty or missing revlog for %s") % fname)
1885 # Toss out the filenodes that the recipient isn't really
1885 # Toss out the filenodes that the recipient isn't really
1886 # missing.
1886 # missing.
1887 if fname in msng_filenode_set:
1887 if fname in msng_filenode_set:
1888 prune_filenodes(fname, filerevlog)
1888 prune_filenodes(fname, filerevlog)
1889 add_extra_nodes(fname, msng_filenode_set[fname])
1889 add_extra_nodes(fname, msng_filenode_set[fname])
1890 msng_filenode_lst = msng_filenode_set[fname].keys()
1890 msng_filenode_lst = msng_filenode_set[fname].keys()
1891 else:
1891 else:
1892 msng_filenode_lst = []
1892 msng_filenode_lst = []
1893 # If any filenodes are left, generate the group for them,
1893 # If any filenodes are left, generate the group for them,
1894 # otherwise don't bother.
1894 # otherwise don't bother.
1895 if len(msng_filenode_lst) > 0:
1895 if len(msng_filenode_lst) > 0:
1896 yield changegroup.chunkheader(len(fname))
1896 yield changegroup.chunkheader(len(fname))
1897 yield fname
1897 yield fname
1898 # Sort the filenodes by their revision #
1898 # Sort the filenodes by their revision #
1899 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1899 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1900 # Create a group generator and only pass in a changenode
1900 # Create a group generator and only pass in a changenode
1901 # lookup function as we need to collect no information
1901 # lookup function as we need to collect no information
1902 # from filenodes.
1902 # from filenodes.
1903 group = filerevlog.group(msng_filenode_lst,
1903 group = filerevlog.group(msng_filenode_lst,
1904 lookup_filenode_link_func(fname))
1904 lookup_filenode_link_func(fname))
1905 for chnk in group:
1905 for chnk in group:
1906 yield chnk
1906 yield chnk
1907 if fname in msng_filenode_set:
1907 if fname in msng_filenode_set:
1908 # Don't need this anymore, toss it to free memory.
1908 # Don't need this anymore, toss it to free memory.
1909 del msng_filenode_set[fname]
1909 del msng_filenode_set[fname]
1910 # Signal that no more groups are left.
1910 # Signal that no more groups are left.
1911 yield changegroup.closechunk()
1911 yield changegroup.closechunk()
1912
1912
1913 if msng_cl_lst:
1913 if msng_cl_lst:
1914 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1914 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1915
1915
1916 return util.chunkbuffer(gengroup())
1916 return util.chunkbuffer(gengroup())
1917
1917
1918 def changegroup(self, basenodes, source):
1918 def changegroup(self, basenodes, source):
1919 # to avoid a race we use changegroupsubset() (issue1320)
1919 # to avoid a race we use changegroupsubset() (issue1320)
1920 return self.changegroupsubset(basenodes, self.heads(), source)
1920 return self.changegroupsubset(basenodes, self.heads(), source)
1921
1921
1922 def _changegroup(self, common, source):
1922 def _changegroup(self, common, source):
1923 """Generate a changegroup of all nodes that we have that a recipient
1923 """Generate a changegroup of all nodes that we have that a recipient
1924 doesn't.
1924 doesn't.
1925
1925
1926 This is much easier than the previous function as we can assume that
1926 This is much easier than the previous function as we can assume that
1927 the recipient has any changenode we aren't sending them.
1927 the recipient has any changenode we aren't sending them.
1928
1928
1929 common is the set of common nodes between remote and self"""
1929 common is the set of common nodes between remote and self"""
1930
1930
1931 self.hook('preoutgoing', throw=True, source=source)
1931 self.hook('preoutgoing', throw=True, source=source)
1932
1932
1933 cl = self.changelog
1933 cl = self.changelog
1934 nodes = cl.findmissing(common)
1934 nodes = cl.findmissing(common)
1935 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1935 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1936 self.changegroupinfo(nodes, source)
1936 self.changegroupinfo(nodes, source)
1937
1937
1938 def identity(x):
1938 def identity(x):
1939 return x
1939 return x
1940
1940
1941 def gennodelst(log):
1941 def gennodelst(log):
1942 for r in log:
1942 for r in log:
1943 if log.linkrev(r) in revset:
1943 if log.linkrev(r) in revset:
1944 yield log.node(r)
1944 yield log.node(r)
1945
1945
1946 def changed_file_collector(changedfileset):
1946 def changed_file_collector(changedfileset):
1947 def collect_changed_files(clnode):
1947 def collect_changed_files(clnode):
1948 c = cl.read(clnode)
1948 c = cl.read(clnode)
1949 for fname in c[3]:
1949 for fname in c[3]:
1950 changedfileset[fname] = 1
1950 changedfileset[fname] = 1
1951 return collect_changed_files
1951 return collect_changed_files
1952
1952
1953 def lookuprevlink_func(revlog):
1953 def lookuprevlink_func(revlog):
1954 def lookuprevlink(n):
1954 def lookuprevlink(n):
1955 return cl.node(revlog.linkrev(revlog.rev(n)))
1955 return cl.node(revlog.linkrev(revlog.rev(n)))
1956 return lookuprevlink
1956 return lookuprevlink
1957
1957
1958 def gengroup():
1958 def gengroup():
1959 # construct a list of all changed files
1959 # construct a list of all changed files
1960 changedfiles = {}
1960 changedfiles = {}
1961
1961
1962 for chnk in cl.group(nodes, identity,
1962 for chnk in cl.group(nodes, identity,
1963 changed_file_collector(changedfiles)):
1963 changed_file_collector(changedfiles)):
1964 yield chnk
1964 yield chnk
1965
1965
1966 mnfst = self.manifest
1966 mnfst = self.manifest
1967 nodeiter = gennodelst(mnfst)
1967 nodeiter = gennodelst(mnfst)
1968 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1968 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1969 yield chnk
1969 yield chnk
1970
1970
1971 for fname in util.sort(changedfiles):
1971 for fname in util.sort(changedfiles):
1972 filerevlog = self.file(fname)
1972 filerevlog = self.file(fname)
1973 if not len(filerevlog):
1973 if not len(filerevlog):
1974 raise util.Abort(_("empty or missing revlog for %s") % fname)
1974 raise util.Abort(_("empty or missing revlog for %s") % fname)
1975 nodeiter = gennodelst(filerevlog)
1975 nodeiter = gennodelst(filerevlog)
1976 nodeiter = list(nodeiter)
1976 nodeiter = list(nodeiter)
1977 if nodeiter:
1977 if nodeiter:
1978 yield changegroup.chunkheader(len(fname))
1978 yield changegroup.chunkheader(len(fname))
1979 yield fname
1979 yield fname
1980 lookup = lookuprevlink_func(filerevlog)
1980 lookup = lookuprevlink_func(filerevlog)
1981 for chnk in filerevlog.group(nodeiter, lookup):
1981 for chnk in filerevlog.group(nodeiter, lookup):
1982 yield chnk
1982 yield chnk
1983
1983
1984 yield changegroup.closechunk()
1984 yield changegroup.closechunk()
1985
1985
1986 if nodes:
1986 if nodes:
1987 self.hook('outgoing', node=hex(nodes[0]), source=source)
1987 self.hook('outgoing', node=hex(nodes[0]), source=source)
1988
1988
1989 return util.chunkbuffer(gengroup())
1989 return util.chunkbuffer(gengroup())
1990
1990
1991 def addchangegroup(self, source, srctype, url, emptyok=False):
1991 def addchangegroup(self, source, srctype, url, emptyok=False):
1992 """add changegroup to repo.
1992 """add changegroup to repo.
1993
1993
1994 return values:
1994 return values:
1995 - nothing changed or no source: 0
1995 - nothing changed or no source: 0
1996 - more heads than before: 1+added heads (2..n)
1996 - more heads than before: 1+added heads (2..n)
1997 - less heads than before: -1-removed heads (-2..-n)
1997 - less heads than before: -1-removed heads (-2..-n)
1998 - number of heads stays the same: 1
1998 - number of heads stays the same: 1
1999 """
1999 """
2000 def csmap(x):
2000 def csmap(x):
2001 self.ui.debug(_("add changeset %s\n") % short(x))
2001 self.ui.debug(_("add changeset %s\n") % short(x))
2002 return len(cl)
2002 return len(cl)
2003
2003
2004 def revmap(x):
2004 def revmap(x):
2005 return cl.rev(x)
2005 return cl.rev(x)
2006
2006
2007 if not source:
2007 if not source:
2008 return 0
2008 return 0
2009
2009
2010 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2010 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2011
2011
2012 changesets = files = revisions = 0
2012 changesets = files = revisions = 0
2013
2013
2014 # write changelog data to temp files so concurrent readers will not see
2014 # write changelog data to temp files so concurrent readers will not see
2015 # inconsistent view
2015 # inconsistent view
2016 cl = self.changelog
2016 cl = self.changelog
2017 cl.delayupdate()
2017 cl.delayupdate()
2018 oldheads = len(cl.heads())
2018 oldheads = len(cl.heads())
2019
2019
2020 tr = self.transaction()
2020 tr = self.transaction()
2021 try:
2021 try:
2022 trp = weakref.proxy(tr)
2022 trp = weakref.proxy(tr)
2023 # pull off the changeset group
2023 # pull off the changeset group
2024 self.ui.status(_("adding changesets\n"))
2024 self.ui.status(_("adding changesets\n"))
2025 cor = len(cl) - 1
2025 cor = len(cl) - 1
2026 chunkiter = changegroup.chunkiter(source)
2026 chunkiter = changegroup.chunkiter(source)
2027 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2027 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2028 raise util.Abort(_("received changelog group is empty"))
2028 raise util.Abort(_("received changelog group is empty"))
2029 cnr = len(cl) - 1
2029 cnr = len(cl) - 1
2030 changesets = cnr - cor
2030 changesets = cnr - cor
2031
2031
2032 # pull off the manifest group
2032 # pull off the manifest group
2033 self.ui.status(_("adding manifests\n"))
2033 self.ui.status(_("adding manifests\n"))
2034 chunkiter = changegroup.chunkiter(source)
2034 chunkiter = changegroup.chunkiter(source)
2035 # no need to check for empty manifest group here:
2035 # no need to check for empty manifest group here:
2036 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2036 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2037 # no new manifest will be created and the manifest group will
2037 # no new manifest will be created and the manifest group will
2038 # be empty during the pull
2038 # be empty during the pull
2039 self.manifest.addgroup(chunkiter, revmap, trp)
2039 self.manifest.addgroup(chunkiter, revmap, trp)
2040
2040
2041 # process the files
2041 # process the files
2042 self.ui.status(_("adding file changes\n"))
2042 self.ui.status(_("adding file changes\n"))
2043 while 1:
2043 while 1:
2044 f = changegroup.getchunk(source)
2044 f = changegroup.getchunk(source)
2045 if not f:
2045 if not f:
2046 break
2046 break
2047 self.ui.debug(_("adding %s revisions\n") % f)
2047 self.ui.debug(_("adding %s revisions\n") % f)
2048 fl = self.file(f)
2048 fl = self.file(f)
2049 o = len(fl)
2049 o = len(fl)
2050 chunkiter = changegroup.chunkiter(source)
2050 chunkiter = changegroup.chunkiter(source)
2051 if fl.addgroup(chunkiter, revmap, trp) is None:
2051 if fl.addgroup(chunkiter, revmap, trp) is None:
2052 raise util.Abort(_("received file revlog group is empty"))
2052 raise util.Abort(_("received file revlog group is empty"))
2053 revisions += len(fl) - o
2053 revisions += len(fl) - o
2054 files += 1
2054 files += 1
2055
2055
2056 newheads = len(self.changelog.heads())
2056 newheads = len(self.changelog.heads())
2057 heads = ""
2057 heads = ""
2058 if oldheads and newheads != oldheads:
2058 if oldheads and newheads != oldheads:
2059 heads = _(" (%+d heads)") % (newheads - oldheads)
2059 heads = _(" (%+d heads)") % (newheads - oldheads)
2060
2060
2061 self.ui.status(_("added %d changesets"
2061 self.ui.status(_("added %d changesets"
2062 " with %d changes to %d files%s\n")
2062 " with %d changes to %d files%s\n")
2063 % (changesets, revisions, files, heads))
2063 % (changesets, revisions, files, heads))
2064
2064
2065 if changesets > 0:
2065 if changesets > 0:
2066 p = lambda: self.changelog.writepending() and self.root or ""
2066 p = lambda: self.changelog.writepending() and self.root or ""
2067 self.hook('pretxnchangegroup', throw=True,
2067 self.hook('pretxnchangegroup', throw=True,
2068 node=hex(self.changelog.node(cor+1)), source=srctype,
2068 node=hex(self.changelog.node(cor+1)), source=srctype,
2069 url=url, pending=p)
2069 url=url, pending=p)
2070
2070
2071 # make changelog see real files again
2071 # make changelog see real files again
2072 cl.finalize(trp)
2072 cl.finalize(trp)
2073
2073
2074 tr.close()
2074 tr.close()
2075 finally:
2075 finally:
2076 del tr
2076 del tr
2077
2077
2078 if changesets > 0:
2078 if changesets > 0:
2079 # forcefully update the on-disk branch cache
2079 # forcefully update the on-disk branch cache
2080 self.ui.debug(_("updating the branch cache\n"))
2080 self.ui.debug(_("updating the branch cache\n"))
2081 self.branchtags()
2081 self.branchtags()
2082 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2082 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2083 source=srctype, url=url)
2083 source=srctype, url=url)
2084
2084
2085 for i in xrange(cor + 1, cnr + 1):
2085 for i in xrange(cor + 1, cnr + 1):
2086 self.hook("incoming", node=hex(self.changelog.node(i)),
2086 self.hook("incoming", node=hex(self.changelog.node(i)),
2087 source=srctype, url=url)
2087 source=srctype, url=url)
2088
2088
2089 # never return 0 here:
2089 # never return 0 here:
2090 if newheads < oldheads:
2090 if newheads < oldheads:
2091 return newheads - oldheads - 1
2091 return newheads - oldheads - 1
2092 else:
2092 else:
2093 return newheads - oldheads + 1
2093 return newheads - oldheads + 1
2094
2094
2095
2095
2096 def stream_in(self, remote):
2096 def stream_in(self, remote):
2097 fp = remote.stream_out()
2097 fp = remote.stream_out()
2098 l = fp.readline()
2098 l = fp.readline()
2099 try:
2099 try:
2100 resp = int(l)
2100 resp = int(l)
2101 except ValueError:
2101 except ValueError:
2102 raise error.ResponseError(
2102 raise error.ResponseError(
2103 _('Unexpected response from remote server:'), l)
2103 _('Unexpected response from remote server:'), l)
2104 if resp == 1:
2104 if resp == 1:
2105 raise util.Abort(_('operation forbidden by server'))
2105 raise util.Abort(_('operation forbidden by server'))
2106 elif resp == 2:
2106 elif resp == 2:
2107 raise util.Abort(_('locking the remote repository failed'))
2107 raise util.Abort(_('locking the remote repository failed'))
2108 elif resp != 0:
2108 elif resp != 0:
2109 raise util.Abort(_('the server sent an unknown error code'))
2109 raise util.Abort(_('the server sent an unknown error code'))
2110 self.ui.status(_('streaming all changes\n'))
2110 self.ui.status(_('streaming all changes\n'))
2111 l = fp.readline()
2111 l = fp.readline()
2112 try:
2112 try:
2113 total_files, total_bytes = map(int, l.split(' ', 1))
2113 total_files, total_bytes = map(int, l.split(' ', 1))
2114 except (ValueError, TypeError):
2114 except (ValueError, TypeError):
2115 raise error.ResponseError(
2115 raise error.ResponseError(
2116 _('Unexpected response from remote server:'), l)
2116 _('Unexpected response from remote server:'), l)
2117 self.ui.status(_('%d files to transfer, %s of data\n') %
2117 self.ui.status(_('%d files to transfer, %s of data\n') %
2118 (total_files, util.bytecount(total_bytes)))
2118 (total_files, util.bytecount(total_bytes)))
2119 start = time.time()
2119 start = time.time()
2120 for i in xrange(total_files):
2120 for i in xrange(total_files):
2121 # XXX doesn't support '\n' or '\r' in filenames
2121 # XXX doesn't support '\n' or '\r' in filenames
2122 l = fp.readline()
2122 l = fp.readline()
2123 try:
2123 try:
2124 name, size = l.split('\0', 1)
2124 name, size = l.split('\0', 1)
2125 size = int(size)
2125 size = int(size)
2126 except (ValueError, TypeError):
2126 except (ValueError, TypeError):
2127 raise error.ResponseError(
2127 raise error.ResponseError(
2128 _('Unexpected response from remote server:'), l)
2128 _('Unexpected response from remote server:'), l)
2129 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2129 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2130 ofp = self.sopener(name, 'w')
2130 ofp = self.sopener(name, 'w')
2131 for chunk in util.filechunkiter(fp, limit=size):
2131 for chunk in util.filechunkiter(fp, limit=size):
2132 ofp.write(chunk)
2132 ofp.write(chunk)
2133 ofp.close()
2133 ofp.close()
2134 elapsed = time.time() - start
2134 elapsed = time.time() - start
2135 if elapsed <= 0:
2135 if elapsed <= 0:
2136 elapsed = 0.001
2136 elapsed = 0.001
2137 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2137 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2138 (util.bytecount(total_bytes), elapsed,
2138 (util.bytecount(total_bytes), elapsed,
2139 util.bytecount(total_bytes / elapsed)))
2139 util.bytecount(total_bytes / elapsed)))
2140 self.invalidate()
2140 self.invalidate()
2141 return len(self.heads()) + 1
2141 return len(self.heads()) + 1
2142
2142
2143 def clone(self, remote, heads=[], stream=False):
2143 def clone(self, remote, heads=[], stream=False):
2144 '''clone remote repository.
2144 '''clone remote repository.
2145
2145
2146 keyword arguments:
2146 keyword arguments:
2147 heads: list of revs to clone (forces use of pull)
2147 heads: list of revs to clone (forces use of pull)
2148 stream: use streaming clone if possible'''
2148 stream: use streaming clone if possible'''
2149
2149
2150 # now, all clients that can request uncompressed clones can
2150 # now, all clients that can request uncompressed clones can
2151 # read repo formats supported by all servers that can serve
2151 # read repo formats supported by all servers that can serve
2152 # them.
2152 # them.
2153
2153
2154 # if revlog format changes, client will have to check version
2154 # if revlog format changes, client will have to check version
2155 # and format flags on "stream" capability, and use
2155 # and format flags on "stream" capability, and use
2156 # uncompressed only if compatible.
2156 # uncompressed only if compatible.
2157
2157
2158 if stream and not heads and remote.capable('stream'):
2158 if stream and not heads and remote.capable('stream'):
2159 return self.stream_in(remote)
2159 return self.stream_in(remote)
2160 return self.pull(remote, heads)
2160 return self.pull(remote, heads)
2161
2161
2162 # used to avoid circular references so destructors work
2162 # used to avoid circular references so destructors work
2163 def aftertrans(files):
2163 def aftertrans(files):
2164 renamefiles = [tuple(t) for t in files]
2164 renamefiles = [tuple(t) for t in files]
2165 def a():
2165 def a():
2166 for src, dest in renamefiles:
2166 for src, dest in renamefiles:
2167 util.rename(src, dest)
2167 util.rename(src, dest)
2168 return a
2168 return a
2169
2169
2170 def instance(ui, path, create):
2170 def instance(ui, path, create):
2171 return localrepository(ui, util.drop_scheme('file', path), create)
2171 return localrepository(ui, util.drop_scheme('file', path), create)
2172
2172
2173 def islocal(path):
2173 def islocal(path):
2174 return True
2174 return True
@@ -1,1495 +1,1491 b''
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7
7
8 This software may be used and distributed according to the terms
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
9 of the GNU General Public License, incorporated herein by reference.
10
10
11 This contains helper routines that are independent of the SCM core and hide
11 This contains helper routines that are independent of the SCM core and hide
12 platform-specific details from the core.
12 platform-specific details from the core.
13 """
13 """
14
14
15 from i18n import _
15 from i18n import _
16 import cStringIO, errno, re, shutil, sys, tempfile, traceback, error
16 import cStringIO, errno, re, shutil, sys, tempfile, traceback, error
17 import os, stat, threading, time, calendar, ConfigParser, glob, osutil
17 import os, stat, threading, time, calendar, ConfigParser, glob, osutil
18 import imp
18 import imp
19
19
20 # Python compatibility
20 # Python compatibility
21
21
22 _md5 = None
22 _md5 = None
23 def md5(s):
23 def md5(s):
24 global _md5
24 global _md5
25 if _md5 is None:
25 if _md5 is None:
26 try:
26 try:
27 import hashlib
27 import hashlib
28 _md5 = hashlib.md5
28 _md5 = hashlib.md5
29 except ImportError:
29 except ImportError:
30 import md5
30 import md5
31 _md5 = md5.md5
31 _md5 = md5.md5
32 return _md5(s)
32 return _md5(s)
33
33
34 _sha1 = None
34 _sha1 = None
35 def sha1(s):
35 def sha1(s):
36 global _sha1
36 global _sha1
37 if _sha1 is None:
37 if _sha1 is None:
38 try:
38 try:
39 import hashlib
39 import hashlib
40 _sha1 = hashlib.sha1
40 _sha1 = hashlib.sha1
41 except ImportError:
41 except ImportError:
42 import sha
42 import sha
43 _sha1 = sha.sha
43 _sha1 = sha.sha
44 return _sha1(s)
44 return _sha1(s)
45
45
46 try:
46 try:
47 import subprocess
47 import subprocess
48 subprocess.Popen # trigger ImportError early
48 subprocess.Popen # trigger ImportError early
49 closefds = os.name == 'posix'
49 closefds = os.name == 'posix'
50 def popen2(cmd, mode='t', bufsize=-1):
50 def popen2(cmd, mode='t', bufsize=-1):
51 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
51 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
52 close_fds=closefds,
52 close_fds=closefds,
53 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
53 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
54 return p.stdin, p.stdout
54 return p.stdin, p.stdout
55 def popen3(cmd, mode='t', bufsize=-1):
55 def popen3(cmd, mode='t', bufsize=-1):
56 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
56 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
57 close_fds=closefds,
57 close_fds=closefds,
58 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
58 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
59 stderr=subprocess.PIPE)
59 stderr=subprocess.PIPE)
60 return p.stdin, p.stdout, p.stderr
60 return p.stdin, p.stdout, p.stderr
61 def Popen3(cmd, capturestderr=False, bufsize=-1):
61 def Popen3(cmd, capturestderr=False, bufsize=-1):
62 stderr = capturestderr and subprocess.PIPE or None
62 stderr = capturestderr and subprocess.PIPE or None
63 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
63 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
64 close_fds=closefds,
64 close_fds=closefds,
65 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
65 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
66 stderr=stderr)
66 stderr=stderr)
67 p.fromchild = p.stdout
67 p.fromchild = p.stdout
68 p.tochild = p.stdin
68 p.tochild = p.stdin
69 p.childerr = p.stderr
69 p.childerr = p.stderr
70 return p
70 return p
71 except ImportError:
71 except ImportError:
72 subprocess = None
72 subprocess = None
73 from popen2 import Popen3
73 from popen2 import Popen3
74 popen2 = os.popen2
74 popen2 = os.popen2
75 popen3 = os.popen3
75 popen3 = os.popen3
76
76
77
77
78 def version():
78 def version():
79 """Return version information if available."""
79 """Return version information if available."""
80 try:
80 try:
81 import __version__
81 import __version__
82 return __version__.version
82 return __version__.version
83 except ImportError:
83 except ImportError:
84 return 'unknown'
84 return 'unknown'
85
85
86 # used by parsedate
86 # used by parsedate
87 defaultdateformats = (
87 defaultdateformats = (
88 '%Y-%m-%d %H:%M:%S',
88 '%Y-%m-%d %H:%M:%S',
89 '%Y-%m-%d %I:%M:%S%p',
89 '%Y-%m-%d %I:%M:%S%p',
90 '%Y-%m-%d %H:%M',
90 '%Y-%m-%d %H:%M',
91 '%Y-%m-%d %I:%M%p',
91 '%Y-%m-%d %I:%M%p',
92 '%Y-%m-%d',
92 '%Y-%m-%d',
93 '%m-%d',
93 '%m-%d',
94 '%m/%d',
94 '%m/%d',
95 '%m/%d/%y',
95 '%m/%d/%y',
96 '%m/%d/%Y',
96 '%m/%d/%Y',
97 '%a %b %d %H:%M:%S %Y',
97 '%a %b %d %H:%M:%S %Y',
98 '%a %b %d %I:%M:%S%p %Y',
98 '%a %b %d %I:%M:%S%p %Y',
99 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
99 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
100 '%b %d %H:%M:%S %Y',
100 '%b %d %H:%M:%S %Y',
101 '%b %d %I:%M:%S%p %Y',
101 '%b %d %I:%M:%S%p %Y',
102 '%b %d %H:%M:%S',
102 '%b %d %H:%M:%S',
103 '%b %d %I:%M:%S%p',
103 '%b %d %I:%M:%S%p',
104 '%b %d %H:%M',
104 '%b %d %H:%M',
105 '%b %d %I:%M%p',
105 '%b %d %I:%M%p',
106 '%b %d %Y',
106 '%b %d %Y',
107 '%b %d',
107 '%b %d',
108 '%H:%M:%S',
108 '%H:%M:%S',
109 '%I:%M:%SP',
109 '%I:%M:%SP',
110 '%H:%M',
110 '%H:%M',
111 '%I:%M%p',
111 '%I:%M%p',
112 )
112 )
113
113
114 extendeddateformats = defaultdateformats + (
114 extendeddateformats = defaultdateformats + (
115 "%Y",
115 "%Y",
116 "%Y-%m",
116 "%Y-%m",
117 "%b",
117 "%b",
118 "%b %Y",
118 "%b %Y",
119 )
119 )
120
120
121 # differences from SafeConfigParser:
121 # differences from SafeConfigParser:
122 # - case-sensitive keys
122 # - case-sensitive keys
123 # - allows values that are not strings (this means that you may not
123 # - allows values that are not strings (this means that you may not
124 # be able to save the configuration to a file)
124 # be able to save the configuration to a file)
125 class configparser(ConfigParser.SafeConfigParser):
125 class configparser(ConfigParser.SafeConfigParser):
126 def optionxform(self, optionstr):
126 def optionxform(self, optionstr):
127 return optionstr
127 return optionstr
128
128
129 def set(self, section, option, value):
129 def set(self, section, option, value):
130 return ConfigParser.ConfigParser.set(self, section, option, value)
130 return ConfigParser.ConfigParser.set(self, section, option, value)
131
131
132 def _interpolate(self, section, option, rawval, vars):
132 def _interpolate(self, section, option, rawval, vars):
133 if not isinstance(rawval, basestring):
133 if not isinstance(rawval, basestring):
134 return rawval
134 return rawval
135 return ConfigParser.SafeConfigParser._interpolate(self, section,
135 return ConfigParser.SafeConfigParser._interpolate(self, section,
136 option, rawval, vars)
136 option, rawval, vars)
137
137
138 def cachefunc(func):
138 def cachefunc(func):
139 '''cache the result of function calls'''
139 '''cache the result of function calls'''
140 # XXX doesn't handle keywords args
140 # XXX doesn't handle keywords args
141 cache = {}
141 cache = {}
142 if func.func_code.co_argcount == 1:
142 if func.func_code.co_argcount == 1:
143 # we gain a small amount of time because
143 # we gain a small amount of time because
144 # we don't need to pack/unpack the list
144 # we don't need to pack/unpack the list
145 def f(arg):
145 def f(arg):
146 if arg not in cache:
146 if arg not in cache:
147 cache[arg] = func(arg)
147 cache[arg] = func(arg)
148 return cache[arg]
148 return cache[arg]
149 else:
149 else:
150 def f(*args):
150 def f(*args):
151 if args not in cache:
151 if args not in cache:
152 cache[args] = func(*args)
152 cache[args] = func(*args)
153 return cache[args]
153 return cache[args]
154
154
155 return f
155 return f
156
156
157 def pipefilter(s, cmd):
157 def pipefilter(s, cmd):
158 '''filter string S through command CMD, returning its output'''
158 '''filter string S through command CMD, returning its output'''
159 (pin, pout) = popen2(cmd, 'b')
159 (pin, pout) = popen2(cmd, 'b')
160 def writer():
160 def writer():
161 try:
161 try:
162 pin.write(s)
162 pin.write(s)
163 pin.close()
163 pin.close()
164 except IOError, inst:
164 except IOError, inst:
165 if inst.errno != errno.EPIPE:
165 if inst.errno != errno.EPIPE:
166 raise
166 raise
167
167
168 # we should use select instead on UNIX, but this will work on most
168 # we should use select instead on UNIX, but this will work on most
169 # systems, including Windows
169 # systems, including Windows
170 w = threading.Thread(target=writer)
170 w = threading.Thread(target=writer)
171 w.start()
171 w.start()
172 f = pout.read()
172 f = pout.read()
173 pout.close()
173 pout.close()
174 w.join()
174 w.join()
175 return f
175 return f
176
176
177 def tempfilter(s, cmd):
177 def tempfilter(s, cmd):
178 '''filter string S through a pair of temporary files with CMD.
178 '''filter string S through a pair of temporary files with CMD.
179 CMD is used as a template to create the real command to be run,
179 CMD is used as a template to create the real command to be run,
180 with the strings INFILE and OUTFILE replaced by the real names of
180 with the strings INFILE and OUTFILE replaced by the real names of
181 the temporary files generated.'''
181 the temporary files generated.'''
182 inname, outname = None, None
182 inname, outname = None, None
183 try:
183 try:
184 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
184 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
185 fp = os.fdopen(infd, 'wb')
185 fp = os.fdopen(infd, 'wb')
186 fp.write(s)
186 fp.write(s)
187 fp.close()
187 fp.close()
188 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
188 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
189 os.close(outfd)
189 os.close(outfd)
190 cmd = cmd.replace('INFILE', inname)
190 cmd = cmd.replace('INFILE', inname)
191 cmd = cmd.replace('OUTFILE', outname)
191 cmd = cmd.replace('OUTFILE', outname)
192 code = os.system(cmd)
192 code = os.system(cmd)
193 if sys.platform == 'OpenVMS' and code & 1:
193 if sys.platform == 'OpenVMS' and code & 1:
194 code = 0
194 code = 0
195 if code: raise Abort(_("command '%s' failed: %s") %
195 if code: raise Abort(_("command '%s' failed: %s") %
196 (cmd, explain_exit(code)))
196 (cmd, explain_exit(code)))
197 return open(outname, 'rb').read()
197 return open(outname, 'rb').read()
198 finally:
198 finally:
199 try:
199 try:
200 if inname: os.unlink(inname)
200 if inname: os.unlink(inname)
201 except: pass
201 except: pass
202 try:
202 try:
203 if outname: os.unlink(outname)
203 if outname: os.unlink(outname)
204 except: pass
204 except: pass
205
205
206 filtertable = {
206 filtertable = {
207 'tempfile:': tempfilter,
207 'tempfile:': tempfilter,
208 'pipe:': pipefilter,
208 'pipe:': pipefilter,
209 }
209 }
210
210
211 def filter(s, cmd):
211 def filter(s, cmd):
212 "filter a string through a command that transforms its input to its output"
212 "filter a string through a command that transforms its input to its output"
213 for name, fn in filtertable.iteritems():
213 for name, fn in filtertable.iteritems():
214 if cmd.startswith(name):
214 if cmd.startswith(name):
215 return fn(s, cmd[len(name):].lstrip())
215 return fn(s, cmd[len(name):].lstrip())
216 return pipefilter(s, cmd)
216 return pipefilter(s, cmd)
217
217
218 def binary(s):
218 def binary(s):
219 """return true if a string is binary data"""
219 """return true if a string is binary data"""
220 return bool(s and '\0' in s)
220 return bool(s and '\0' in s)
221
221
222 def unique(g):
223 """return the uniq elements of iterable g"""
224 return dict.fromkeys(g).keys()
225
226 def sort(l):
222 def sort(l):
227 if not isinstance(l, list):
223 if not isinstance(l, list):
228 l = list(l)
224 l = list(l)
229 l.sort()
225 l.sort()
230 return l
226 return l
231
227
232 def increasingchunks(source, min=1024, max=65536):
228 def increasingchunks(source, min=1024, max=65536):
233 '''return no less than min bytes per chunk while data remains,
229 '''return no less than min bytes per chunk while data remains,
234 doubling min after each chunk until it reaches max'''
230 doubling min after each chunk until it reaches max'''
235 def log2(x):
231 def log2(x):
236 if not x:
232 if not x:
237 return 0
233 return 0
238 i = 0
234 i = 0
239 while x:
235 while x:
240 x >>= 1
236 x >>= 1
241 i += 1
237 i += 1
242 return i - 1
238 return i - 1
243
239
244 buf = []
240 buf = []
245 blen = 0
241 blen = 0
246 for chunk in source:
242 for chunk in source:
247 buf.append(chunk)
243 buf.append(chunk)
248 blen += len(chunk)
244 blen += len(chunk)
249 if blen >= min:
245 if blen >= min:
250 if min < max:
246 if min < max:
251 min = min << 1
247 min = min << 1
252 nmin = 1 << log2(blen)
248 nmin = 1 << log2(blen)
253 if nmin > min:
249 if nmin > min:
254 min = nmin
250 min = nmin
255 if min > max:
251 if min > max:
256 min = max
252 min = max
257 yield ''.join(buf)
253 yield ''.join(buf)
258 blen = 0
254 blen = 0
259 buf = []
255 buf = []
260 if buf:
256 if buf:
261 yield ''.join(buf)
257 yield ''.join(buf)
262
258
263 Abort = error.Abort
259 Abort = error.Abort
264
260
265 def always(fn): return True
261 def always(fn): return True
266 def never(fn): return False
262 def never(fn): return False
267
263
268 def patkind(name, default):
264 def patkind(name, default):
269 """Split a string into an optional pattern kind prefix and the
265 """Split a string into an optional pattern kind prefix and the
270 actual pattern."""
266 actual pattern."""
271 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
267 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
272 if name.startswith(prefix + ':'): return name.split(':', 1)
268 if name.startswith(prefix + ':'): return name.split(':', 1)
273 return default, name
269 return default, name
274
270
275 def globre(pat, head='^', tail='$'):
271 def globre(pat, head='^', tail='$'):
276 "convert a glob pattern into a regexp"
272 "convert a glob pattern into a regexp"
277 i, n = 0, len(pat)
273 i, n = 0, len(pat)
278 res = ''
274 res = ''
279 group = 0
275 group = 0
280 def peek(): return i < n and pat[i]
276 def peek(): return i < n and pat[i]
281 while i < n:
277 while i < n:
282 c = pat[i]
278 c = pat[i]
283 i = i+1
279 i = i+1
284 if c == '*':
280 if c == '*':
285 if peek() == '*':
281 if peek() == '*':
286 i += 1
282 i += 1
287 res += '.*'
283 res += '.*'
288 else:
284 else:
289 res += '[^/]*'
285 res += '[^/]*'
290 elif c == '?':
286 elif c == '?':
291 res += '.'
287 res += '.'
292 elif c == '[':
288 elif c == '[':
293 j = i
289 j = i
294 if j < n and pat[j] in '!]':
290 if j < n and pat[j] in '!]':
295 j += 1
291 j += 1
296 while j < n and pat[j] != ']':
292 while j < n and pat[j] != ']':
297 j += 1
293 j += 1
298 if j >= n:
294 if j >= n:
299 res += '\\['
295 res += '\\['
300 else:
296 else:
301 stuff = pat[i:j].replace('\\','\\\\')
297 stuff = pat[i:j].replace('\\','\\\\')
302 i = j + 1
298 i = j + 1
303 if stuff[0] == '!':
299 if stuff[0] == '!':
304 stuff = '^' + stuff[1:]
300 stuff = '^' + stuff[1:]
305 elif stuff[0] == '^':
301 elif stuff[0] == '^':
306 stuff = '\\' + stuff
302 stuff = '\\' + stuff
307 res = '%s[%s]' % (res, stuff)
303 res = '%s[%s]' % (res, stuff)
308 elif c == '{':
304 elif c == '{':
309 group += 1
305 group += 1
310 res += '(?:'
306 res += '(?:'
311 elif c == '}' and group:
307 elif c == '}' and group:
312 res += ')'
308 res += ')'
313 group -= 1
309 group -= 1
314 elif c == ',' and group:
310 elif c == ',' and group:
315 res += '|'
311 res += '|'
316 elif c == '\\':
312 elif c == '\\':
317 p = peek()
313 p = peek()
318 if p:
314 if p:
319 i += 1
315 i += 1
320 res += re.escape(p)
316 res += re.escape(p)
321 else:
317 else:
322 res += re.escape(c)
318 res += re.escape(c)
323 else:
319 else:
324 res += re.escape(c)
320 res += re.escape(c)
325 return head + res + tail
321 return head + res + tail
326
322
327 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
323 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
328
324
329 def pathto(root, n1, n2):
325 def pathto(root, n1, n2):
330 '''return the relative path from one place to another.
326 '''return the relative path from one place to another.
331 root should use os.sep to separate directories
327 root should use os.sep to separate directories
332 n1 should use os.sep to separate directories
328 n1 should use os.sep to separate directories
333 n2 should use "/" to separate directories
329 n2 should use "/" to separate directories
334 returns an os.sep-separated path.
330 returns an os.sep-separated path.
335
331
336 If n1 is a relative path, it's assumed it's
332 If n1 is a relative path, it's assumed it's
337 relative to root.
333 relative to root.
338 n2 should always be relative to root.
334 n2 should always be relative to root.
339 '''
335 '''
340 if not n1: return localpath(n2)
336 if not n1: return localpath(n2)
341 if os.path.isabs(n1):
337 if os.path.isabs(n1):
342 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
338 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
343 return os.path.join(root, localpath(n2))
339 return os.path.join(root, localpath(n2))
344 n2 = '/'.join((pconvert(root), n2))
340 n2 = '/'.join((pconvert(root), n2))
345 a, b = splitpath(n1), n2.split('/')
341 a, b = splitpath(n1), n2.split('/')
346 a.reverse()
342 a.reverse()
347 b.reverse()
343 b.reverse()
348 while a and b and a[-1] == b[-1]:
344 while a and b and a[-1] == b[-1]:
349 a.pop()
345 a.pop()
350 b.pop()
346 b.pop()
351 b.reverse()
347 b.reverse()
352 return os.sep.join((['..'] * len(a)) + b) or '.'
348 return os.sep.join((['..'] * len(a)) + b) or '.'
353
349
354 def canonpath(root, cwd, myname):
350 def canonpath(root, cwd, myname):
355 """return the canonical path of myname, given cwd and root"""
351 """return the canonical path of myname, given cwd and root"""
356 if root == os.sep:
352 if root == os.sep:
357 rootsep = os.sep
353 rootsep = os.sep
358 elif endswithsep(root):
354 elif endswithsep(root):
359 rootsep = root
355 rootsep = root
360 else:
356 else:
361 rootsep = root + os.sep
357 rootsep = root + os.sep
362 name = myname
358 name = myname
363 if not os.path.isabs(name):
359 if not os.path.isabs(name):
364 name = os.path.join(root, cwd, name)
360 name = os.path.join(root, cwd, name)
365 name = os.path.normpath(name)
361 name = os.path.normpath(name)
366 audit_path = path_auditor(root)
362 audit_path = path_auditor(root)
367 if name != rootsep and name.startswith(rootsep):
363 if name != rootsep and name.startswith(rootsep):
368 name = name[len(rootsep):]
364 name = name[len(rootsep):]
369 audit_path(name)
365 audit_path(name)
370 return pconvert(name)
366 return pconvert(name)
371 elif name == root:
367 elif name == root:
372 return ''
368 return ''
373 else:
369 else:
374 # Determine whether `name' is in the hierarchy at or beneath `root',
370 # Determine whether `name' is in the hierarchy at or beneath `root',
375 # by iterating name=dirname(name) until that causes no change (can't
371 # by iterating name=dirname(name) until that causes no change (can't
376 # check name == '/', because that doesn't work on windows). For each
372 # check name == '/', because that doesn't work on windows). For each
377 # `name', compare dev/inode numbers. If they match, the list `rel'
373 # `name', compare dev/inode numbers. If they match, the list `rel'
378 # holds the reversed list of components making up the relative file
374 # holds the reversed list of components making up the relative file
379 # name we want.
375 # name we want.
380 root_st = os.stat(root)
376 root_st = os.stat(root)
381 rel = []
377 rel = []
382 while True:
378 while True:
383 try:
379 try:
384 name_st = os.stat(name)
380 name_st = os.stat(name)
385 except OSError:
381 except OSError:
386 break
382 break
387 if samestat(name_st, root_st):
383 if samestat(name_st, root_st):
388 if not rel:
384 if not rel:
389 # name was actually the same as root (maybe a symlink)
385 # name was actually the same as root (maybe a symlink)
390 return ''
386 return ''
391 rel.reverse()
387 rel.reverse()
392 name = os.path.join(*rel)
388 name = os.path.join(*rel)
393 audit_path(name)
389 audit_path(name)
394 return pconvert(name)
390 return pconvert(name)
395 dirname, basename = os.path.split(name)
391 dirname, basename = os.path.split(name)
396 rel.append(basename)
392 rel.append(basename)
397 if dirname == name:
393 if dirname == name:
398 break
394 break
399 name = dirname
395 name = dirname
400
396
401 raise Abort('%s not under root' % myname)
397 raise Abort('%s not under root' % myname)
402
398
403 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
399 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
404 """build a function to match a set of file patterns
400 """build a function to match a set of file patterns
405
401
406 arguments:
402 arguments:
407 canonroot - the canonical root of the tree you're matching against
403 canonroot - the canonical root of the tree you're matching against
408 cwd - the current working directory, if relevant
404 cwd - the current working directory, if relevant
409 names - patterns to find
405 names - patterns to find
410 inc - patterns to include
406 inc - patterns to include
411 exc - patterns to exclude
407 exc - patterns to exclude
412 dflt_pat - if a pattern in names has no explicit type, assume this one
408 dflt_pat - if a pattern in names has no explicit type, assume this one
413 src - where these patterns came from (e.g. .hgignore)
409 src - where these patterns came from (e.g. .hgignore)
414
410
415 a pattern is one of:
411 a pattern is one of:
416 'glob:<glob>' - a glob relative to cwd
412 'glob:<glob>' - a glob relative to cwd
417 're:<regexp>' - a regular expression
413 're:<regexp>' - a regular expression
418 'path:<path>' - a path relative to canonroot
414 'path:<path>' - a path relative to canonroot
419 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
415 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
420 'relpath:<path>' - a path relative to cwd
416 'relpath:<path>' - a path relative to cwd
421 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
417 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
422 '<something>' - one of the cases above, selected by the dflt_pat argument
418 '<something>' - one of the cases above, selected by the dflt_pat argument
423
419
424 returns:
420 returns:
425 a 3-tuple containing
421 a 3-tuple containing
426 - list of roots (places where one should start a recursive walk of the fs);
422 - list of roots (places where one should start a recursive walk of the fs);
427 this often matches the explicit non-pattern names passed in, but also
423 this often matches the explicit non-pattern names passed in, but also
428 includes the initial part of glob: patterns that has no glob characters
424 includes the initial part of glob: patterns that has no glob characters
429 - a bool match(filename) function
425 - a bool match(filename) function
430 - a bool indicating if any patterns were passed in
426 - a bool indicating if any patterns were passed in
431 """
427 """
432
428
433 # a common case: no patterns at all
429 # a common case: no patterns at all
434 if not names and not inc and not exc:
430 if not names and not inc and not exc:
435 return [], always, False
431 return [], always, False
436
432
437 def contains_glob(name):
433 def contains_glob(name):
438 for c in name:
434 for c in name:
439 if c in _globchars: return True
435 if c in _globchars: return True
440 return False
436 return False
441
437
442 def regex(kind, name, tail):
438 def regex(kind, name, tail):
443 '''convert a pattern into a regular expression'''
439 '''convert a pattern into a regular expression'''
444 if not name:
440 if not name:
445 return ''
441 return ''
446 if kind == 're':
442 if kind == 're':
447 return name
443 return name
448 elif kind == 'path':
444 elif kind == 'path':
449 return '^' + re.escape(name) + '(?:/|$)'
445 return '^' + re.escape(name) + '(?:/|$)'
450 elif kind == 'relglob':
446 elif kind == 'relglob':
451 return globre(name, '(?:|.*/)', tail)
447 return globre(name, '(?:|.*/)', tail)
452 elif kind == 'relpath':
448 elif kind == 'relpath':
453 return re.escape(name) + '(?:/|$)'
449 return re.escape(name) + '(?:/|$)'
454 elif kind == 'relre':
450 elif kind == 'relre':
455 if name.startswith('^'):
451 if name.startswith('^'):
456 return name
452 return name
457 return '.*' + name
453 return '.*' + name
458 return globre(name, '', tail)
454 return globre(name, '', tail)
459
455
460 def matchfn(pats, tail):
456 def matchfn(pats, tail):
461 """build a matching function from a set of patterns"""
457 """build a matching function from a set of patterns"""
462 if not pats:
458 if not pats:
463 return
459 return
464 try:
460 try:
465 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
461 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
466 if len(pat) > 20000:
462 if len(pat) > 20000:
467 raise OverflowError()
463 raise OverflowError()
468 return re.compile(pat).match
464 return re.compile(pat).match
469 except OverflowError:
465 except OverflowError:
470 # We're using a Python with a tiny regex engine and we
466 # We're using a Python with a tiny regex engine and we
471 # made it explode, so we'll divide the pattern list in two
467 # made it explode, so we'll divide the pattern list in two
472 # until it works
468 # until it works
473 l = len(pats)
469 l = len(pats)
474 if l < 2:
470 if l < 2:
475 raise
471 raise
476 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
472 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
477 return lambda s: a(s) or b(s)
473 return lambda s: a(s) or b(s)
478 except re.error:
474 except re.error:
479 for k, p in pats:
475 for k, p in pats:
480 try:
476 try:
481 re.compile('(?:%s)' % regex(k, p, tail))
477 re.compile('(?:%s)' % regex(k, p, tail))
482 except re.error:
478 except re.error:
483 if src:
479 if src:
484 raise Abort("%s: invalid pattern (%s): %s" %
480 raise Abort("%s: invalid pattern (%s): %s" %
485 (src, k, p))
481 (src, k, p))
486 else:
482 else:
487 raise Abort("invalid pattern (%s): %s" % (k, p))
483 raise Abort("invalid pattern (%s): %s" % (k, p))
488 raise Abort("invalid pattern")
484 raise Abort("invalid pattern")
489
485
490 def globprefix(pat):
486 def globprefix(pat):
491 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
487 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
492 root = []
488 root = []
493 for p in pat.split('/'):
489 for p in pat.split('/'):
494 if contains_glob(p): break
490 if contains_glob(p): break
495 root.append(p)
491 root.append(p)
496 return '/'.join(root) or '.'
492 return '/'.join(root) or '.'
497
493
498 def normalizepats(names, default):
494 def normalizepats(names, default):
499 pats = []
495 pats = []
500 roots = []
496 roots = []
501 anypats = False
497 anypats = False
502 for kind, name in [patkind(p, default) for p in names]:
498 for kind, name in [patkind(p, default) for p in names]:
503 if kind in ('glob', 'relpath'):
499 if kind in ('glob', 'relpath'):
504 name = canonpath(canonroot, cwd, name)
500 name = canonpath(canonroot, cwd, name)
505 elif kind in ('relglob', 'path'):
501 elif kind in ('relglob', 'path'):
506 name = normpath(name)
502 name = normpath(name)
507
503
508 pats.append((kind, name))
504 pats.append((kind, name))
509
505
510 if kind in ('glob', 're', 'relglob', 'relre'):
506 if kind in ('glob', 're', 'relglob', 'relre'):
511 anypats = True
507 anypats = True
512
508
513 if kind == 'glob':
509 if kind == 'glob':
514 root = globprefix(name)
510 root = globprefix(name)
515 roots.append(root)
511 roots.append(root)
516 elif kind in ('relpath', 'path'):
512 elif kind in ('relpath', 'path'):
517 roots.append(name or '.')
513 roots.append(name or '.')
518 elif kind == 'relglob':
514 elif kind == 'relglob':
519 roots.append('.')
515 roots.append('.')
520 return roots, pats, anypats
516 return roots, pats, anypats
521
517
522 roots, pats, anypats = normalizepats(names, dflt_pat)
518 roots, pats, anypats = normalizepats(names, dflt_pat)
523
519
524 patmatch = matchfn(pats, '$') or always
520 patmatch = matchfn(pats, '$') or always
525 incmatch = always
521 incmatch = always
526 if inc:
522 if inc:
527 dummy, inckinds, dummy = normalizepats(inc, 'glob')
523 dummy, inckinds, dummy = normalizepats(inc, 'glob')
528 incmatch = matchfn(inckinds, '(?:/|$)')
524 incmatch = matchfn(inckinds, '(?:/|$)')
529 excmatch = never
525 excmatch = never
530 if exc:
526 if exc:
531 dummy, exckinds, dummy = normalizepats(exc, 'glob')
527 dummy, exckinds, dummy = normalizepats(exc, 'glob')
532 excmatch = matchfn(exckinds, '(?:/|$)')
528 excmatch = matchfn(exckinds, '(?:/|$)')
533
529
534 if not names and inc and not exc:
530 if not names and inc and not exc:
535 # common case: hgignore patterns
531 # common case: hgignore patterns
536 match = incmatch
532 match = incmatch
537 else:
533 else:
538 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
534 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
539
535
540 return (roots, match, (inc or exc or anypats) and True)
536 return (roots, match, (inc or exc or anypats) and True)
541
537
542 _hgexecutable = None
538 _hgexecutable = None
543
539
544 def main_is_frozen():
540 def main_is_frozen():
545 """return True if we are a frozen executable.
541 """return True if we are a frozen executable.
546
542
547 The code supports py2exe (most common, Windows only) and tools/freeze
543 The code supports py2exe (most common, Windows only) and tools/freeze
548 (portable, not much used).
544 (portable, not much used).
549 """
545 """
550 return (hasattr(sys, "frozen") or # new py2exe
546 return (hasattr(sys, "frozen") or # new py2exe
551 hasattr(sys, "importers") or # old py2exe
547 hasattr(sys, "importers") or # old py2exe
552 imp.is_frozen("__main__")) # tools/freeze
548 imp.is_frozen("__main__")) # tools/freeze
553
549
554 def hgexecutable():
550 def hgexecutable():
555 """return location of the 'hg' executable.
551 """return location of the 'hg' executable.
556
552
557 Defaults to $HG or 'hg' in the search path.
553 Defaults to $HG or 'hg' in the search path.
558 """
554 """
559 if _hgexecutable is None:
555 if _hgexecutable is None:
560 hg = os.environ.get('HG')
556 hg = os.environ.get('HG')
561 if hg:
557 if hg:
562 set_hgexecutable(hg)
558 set_hgexecutable(hg)
563 elif main_is_frozen():
559 elif main_is_frozen():
564 set_hgexecutable(sys.executable)
560 set_hgexecutable(sys.executable)
565 else:
561 else:
566 set_hgexecutable(find_exe('hg') or 'hg')
562 set_hgexecutable(find_exe('hg') or 'hg')
567 return _hgexecutable
563 return _hgexecutable
568
564
569 def set_hgexecutable(path):
565 def set_hgexecutable(path):
570 """set location of the 'hg' executable"""
566 """set location of the 'hg' executable"""
571 global _hgexecutable
567 global _hgexecutable
572 _hgexecutable = path
568 _hgexecutable = path
573
569
574 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
570 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
575 '''enhanced shell command execution.
571 '''enhanced shell command execution.
576 run with environment maybe modified, maybe in different dir.
572 run with environment maybe modified, maybe in different dir.
577
573
578 if command fails and onerr is None, return status. if ui object,
574 if command fails and onerr is None, return status. if ui object,
579 print error message and return status, else raise onerr object as
575 print error message and return status, else raise onerr object as
580 exception.'''
576 exception.'''
581 def py2shell(val):
577 def py2shell(val):
582 'convert python object into string that is useful to shell'
578 'convert python object into string that is useful to shell'
583 if val in (None, False):
579 if val in (None, False):
584 return '0'
580 return '0'
585 if val == True:
581 if val == True:
586 return '1'
582 return '1'
587 return str(val)
583 return str(val)
588 oldenv = {}
584 oldenv = {}
589 for k in environ:
585 for k in environ:
590 oldenv[k] = os.environ.get(k)
586 oldenv[k] = os.environ.get(k)
591 if cwd is not None:
587 if cwd is not None:
592 oldcwd = os.getcwd()
588 oldcwd = os.getcwd()
593 origcmd = cmd
589 origcmd = cmd
594 if os.name == 'nt':
590 if os.name == 'nt':
595 cmd = '"%s"' % cmd
591 cmd = '"%s"' % cmd
596 try:
592 try:
597 for k, v in environ.iteritems():
593 for k, v in environ.iteritems():
598 os.environ[k] = py2shell(v)
594 os.environ[k] = py2shell(v)
599 os.environ['HG'] = hgexecutable()
595 os.environ['HG'] = hgexecutable()
600 if cwd is not None and oldcwd != cwd:
596 if cwd is not None and oldcwd != cwd:
601 os.chdir(cwd)
597 os.chdir(cwd)
602 rc = os.system(cmd)
598 rc = os.system(cmd)
603 if sys.platform == 'OpenVMS' and rc & 1:
599 if sys.platform == 'OpenVMS' and rc & 1:
604 rc = 0
600 rc = 0
605 if rc and onerr:
601 if rc and onerr:
606 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
602 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
607 explain_exit(rc)[0])
603 explain_exit(rc)[0])
608 if errprefix:
604 if errprefix:
609 errmsg = '%s: %s' % (errprefix, errmsg)
605 errmsg = '%s: %s' % (errprefix, errmsg)
610 try:
606 try:
611 onerr.warn(errmsg + '\n')
607 onerr.warn(errmsg + '\n')
612 except AttributeError:
608 except AttributeError:
613 raise onerr(errmsg)
609 raise onerr(errmsg)
614 return rc
610 return rc
615 finally:
611 finally:
616 for k, v in oldenv.iteritems():
612 for k, v in oldenv.iteritems():
617 if v is None:
613 if v is None:
618 del os.environ[k]
614 del os.environ[k]
619 else:
615 else:
620 os.environ[k] = v
616 os.environ[k] = v
621 if cwd is not None and oldcwd != cwd:
617 if cwd is not None and oldcwd != cwd:
622 os.chdir(oldcwd)
618 os.chdir(oldcwd)
623
619
624 def checksignature(func):
620 def checksignature(func):
625 '''wrap a function with code to check for calling errors'''
621 '''wrap a function with code to check for calling errors'''
626 def check(*args, **kwargs):
622 def check(*args, **kwargs):
627 try:
623 try:
628 return func(*args, **kwargs)
624 return func(*args, **kwargs)
629 except TypeError:
625 except TypeError:
630 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
626 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
631 raise error.SignatureError
627 raise error.SignatureError
632 raise
628 raise
633
629
634 return check
630 return check
635
631
636 # os.path.lexists is not available on python2.3
632 # os.path.lexists is not available on python2.3
637 def lexists(filename):
633 def lexists(filename):
638 "test whether a file with this name exists. does not follow symlinks"
634 "test whether a file with this name exists. does not follow symlinks"
639 try:
635 try:
640 os.lstat(filename)
636 os.lstat(filename)
641 except:
637 except:
642 return False
638 return False
643 return True
639 return True
644
640
645 def rename(src, dst):
641 def rename(src, dst):
646 """forcibly rename a file"""
642 """forcibly rename a file"""
647 try:
643 try:
648 os.rename(src, dst)
644 os.rename(src, dst)
649 except OSError, err: # FIXME: check err (EEXIST ?)
645 except OSError, err: # FIXME: check err (EEXIST ?)
650 # on windows, rename to existing file is not allowed, so we
646 # on windows, rename to existing file is not allowed, so we
651 # must delete destination first. but if file is open, unlink
647 # must delete destination first. but if file is open, unlink
652 # schedules it for delete but does not delete it. rename
648 # schedules it for delete but does not delete it. rename
653 # happens immediately even for open files, so we rename
649 # happens immediately even for open files, so we rename
654 # destination to a temporary name, then delete that. then
650 # destination to a temporary name, then delete that. then
655 # rename is safe to do.
651 # rename is safe to do.
656 temp = dst + "-force-rename"
652 temp = dst + "-force-rename"
657 os.rename(dst, temp)
653 os.rename(dst, temp)
658 os.unlink(temp)
654 os.unlink(temp)
659 os.rename(src, dst)
655 os.rename(src, dst)
660
656
661 def unlink(f):
657 def unlink(f):
662 """unlink and remove the directory if it is empty"""
658 """unlink and remove the directory if it is empty"""
663 os.unlink(f)
659 os.unlink(f)
664 # try removing directories that might now be empty
660 # try removing directories that might now be empty
665 try:
661 try:
666 os.removedirs(os.path.dirname(f))
662 os.removedirs(os.path.dirname(f))
667 except OSError:
663 except OSError:
668 pass
664 pass
669
665
670 def copyfile(src, dest):
666 def copyfile(src, dest):
671 "copy a file, preserving mode and atime/mtime"
667 "copy a file, preserving mode and atime/mtime"
672 if os.path.islink(src):
668 if os.path.islink(src):
673 try:
669 try:
674 os.unlink(dest)
670 os.unlink(dest)
675 except:
671 except:
676 pass
672 pass
677 os.symlink(os.readlink(src), dest)
673 os.symlink(os.readlink(src), dest)
678 else:
674 else:
679 try:
675 try:
680 shutil.copyfile(src, dest)
676 shutil.copyfile(src, dest)
681 shutil.copystat(src, dest)
677 shutil.copystat(src, dest)
682 except shutil.Error, inst:
678 except shutil.Error, inst:
683 raise Abort(str(inst))
679 raise Abort(str(inst))
684
680
685 def copyfiles(src, dst, hardlink=None):
681 def copyfiles(src, dst, hardlink=None):
686 """Copy a directory tree using hardlinks if possible"""
682 """Copy a directory tree using hardlinks if possible"""
687
683
688 if hardlink is None:
684 if hardlink is None:
689 hardlink = (os.stat(src).st_dev ==
685 hardlink = (os.stat(src).st_dev ==
690 os.stat(os.path.dirname(dst)).st_dev)
686 os.stat(os.path.dirname(dst)).st_dev)
691
687
692 if os.path.isdir(src):
688 if os.path.isdir(src):
693 os.mkdir(dst)
689 os.mkdir(dst)
694 for name, kind in osutil.listdir(src):
690 for name, kind in osutil.listdir(src):
695 srcname = os.path.join(src, name)
691 srcname = os.path.join(src, name)
696 dstname = os.path.join(dst, name)
692 dstname = os.path.join(dst, name)
697 copyfiles(srcname, dstname, hardlink)
693 copyfiles(srcname, dstname, hardlink)
698 else:
694 else:
699 if hardlink:
695 if hardlink:
700 try:
696 try:
701 os_link(src, dst)
697 os_link(src, dst)
702 except (IOError, OSError):
698 except (IOError, OSError):
703 hardlink = False
699 hardlink = False
704 shutil.copy(src, dst)
700 shutil.copy(src, dst)
705 else:
701 else:
706 shutil.copy(src, dst)
702 shutil.copy(src, dst)
707
703
708 class path_auditor(object):
704 class path_auditor(object):
709 '''ensure that a filesystem path contains no banned components.
705 '''ensure that a filesystem path contains no banned components.
710 the following properties of a path are checked:
706 the following properties of a path are checked:
711
707
712 - under top-level .hg
708 - under top-level .hg
713 - starts at the root of a windows drive
709 - starts at the root of a windows drive
714 - contains ".."
710 - contains ".."
715 - traverses a symlink (e.g. a/symlink_here/b)
711 - traverses a symlink (e.g. a/symlink_here/b)
716 - inside a nested repository'''
712 - inside a nested repository'''
717
713
718 def __init__(self, root):
714 def __init__(self, root):
719 self.audited = set()
715 self.audited = set()
720 self.auditeddir = set()
716 self.auditeddir = set()
721 self.root = root
717 self.root = root
722
718
723 def __call__(self, path):
719 def __call__(self, path):
724 if path in self.audited:
720 if path in self.audited:
725 return
721 return
726 normpath = os.path.normcase(path)
722 normpath = os.path.normcase(path)
727 parts = splitpath(normpath)
723 parts = splitpath(normpath)
728 if (os.path.splitdrive(path)[0]
724 if (os.path.splitdrive(path)[0]
729 or parts[0].lower() in ('.hg', '.hg.', '')
725 or parts[0].lower() in ('.hg', '.hg.', '')
730 or os.pardir in parts):
726 or os.pardir in parts):
731 raise Abort(_("path contains illegal component: %s") % path)
727 raise Abort(_("path contains illegal component: %s") % path)
732 if '.hg' in path.lower():
728 if '.hg' in path.lower():
733 lparts = [p.lower() for p in parts]
729 lparts = [p.lower() for p in parts]
734 for p in '.hg', '.hg.':
730 for p in '.hg', '.hg.':
735 if p in lparts[1:]:
731 if p in lparts[1:]:
736 pos = lparts.index(p)
732 pos = lparts.index(p)
737 base = os.path.join(*parts[:pos])
733 base = os.path.join(*parts[:pos])
738 raise Abort(_('path %r is inside repo %r') % (path, base))
734 raise Abort(_('path %r is inside repo %r') % (path, base))
739 def check(prefix):
735 def check(prefix):
740 curpath = os.path.join(self.root, prefix)
736 curpath = os.path.join(self.root, prefix)
741 try:
737 try:
742 st = os.lstat(curpath)
738 st = os.lstat(curpath)
743 except OSError, err:
739 except OSError, err:
744 # EINVAL can be raised as invalid path syntax under win32.
740 # EINVAL can be raised as invalid path syntax under win32.
745 # They must be ignored for patterns can be checked too.
741 # They must be ignored for patterns can be checked too.
746 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
742 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
747 raise
743 raise
748 else:
744 else:
749 if stat.S_ISLNK(st.st_mode):
745 if stat.S_ISLNK(st.st_mode):
750 raise Abort(_('path %r traverses symbolic link %r') %
746 raise Abort(_('path %r traverses symbolic link %r') %
751 (path, prefix))
747 (path, prefix))
752 elif (stat.S_ISDIR(st.st_mode) and
748 elif (stat.S_ISDIR(st.st_mode) and
753 os.path.isdir(os.path.join(curpath, '.hg'))):
749 os.path.isdir(os.path.join(curpath, '.hg'))):
754 raise Abort(_('path %r is inside repo %r') %
750 raise Abort(_('path %r is inside repo %r') %
755 (path, prefix))
751 (path, prefix))
756 parts.pop()
752 parts.pop()
757 prefixes = []
753 prefixes = []
758 for n in range(len(parts)):
754 for n in range(len(parts)):
759 prefix = os.sep.join(parts)
755 prefix = os.sep.join(parts)
760 if prefix in self.auditeddir:
756 if prefix in self.auditeddir:
761 break
757 break
762 check(prefix)
758 check(prefix)
763 prefixes.append(prefix)
759 prefixes.append(prefix)
764 parts.pop()
760 parts.pop()
765
761
766 self.audited.add(path)
762 self.audited.add(path)
767 # only add prefixes to the cache after checking everything: we don't
763 # only add prefixes to the cache after checking everything: we don't
768 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
764 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
769 self.auditeddir.update(prefixes)
765 self.auditeddir.update(prefixes)
770
766
771 def nlinks(pathname):
767 def nlinks(pathname):
772 """Return number of hardlinks for the given file."""
768 """Return number of hardlinks for the given file."""
773 return os.lstat(pathname).st_nlink
769 return os.lstat(pathname).st_nlink
774
770
775 if hasattr(os, 'link'):
771 if hasattr(os, 'link'):
776 os_link = os.link
772 os_link = os.link
777 else:
773 else:
778 def os_link(src, dst):
774 def os_link(src, dst):
779 raise OSError(0, _("Hardlinks not supported"))
775 raise OSError(0, _("Hardlinks not supported"))
780
776
781 def lookup_reg(key, name=None, scope=None):
777 def lookup_reg(key, name=None, scope=None):
782 return None
778 return None
783
779
784 if os.name == 'nt':
780 if os.name == 'nt':
785 from windows import *
781 from windows import *
786 def expand_glob(pats):
782 def expand_glob(pats):
787 '''On Windows, expand the implicit globs in a list of patterns'''
783 '''On Windows, expand the implicit globs in a list of patterns'''
788 ret = []
784 ret = []
789 for p in pats:
785 for p in pats:
790 kind, name = patkind(p, None)
786 kind, name = patkind(p, None)
791 if kind is None:
787 if kind is None:
792 globbed = glob.glob(name)
788 globbed = glob.glob(name)
793 if globbed:
789 if globbed:
794 ret.extend(globbed)
790 ret.extend(globbed)
795 continue
791 continue
796 # if we couldn't expand the glob, just keep it around
792 # if we couldn't expand the glob, just keep it around
797 ret.append(p)
793 ret.append(p)
798 return ret
794 return ret
799 else:
795 else:
800 from posix import *
796 from posix import *
801
797
802 def makelock(info, pathname):
798 def makelock(info, pathname):
803 try:
799 try:
804 return os.symlink(info, pathname)
800 return os.symlink(info, pathname)
805 except OSError, why:
801 except OSError, why:
806 if why.errno == errno.EEXIST:
802 if why.errno == errno.EEXIST:
807 raise
803 raise
808 except AttributeError: # no symlink in os
804 except AttributeError: # no symlink in os
809 pass
805 pass
810
806
811 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
807 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
812 os.write(ld, info)
808 os.write(ld, info)
813 os.close(ld)
809 os.close(ld)
814
810
815 def readlock(pathname):
811 def readlock(pathname):
816 try:
812 try:
817 return os.readlink(pathname)
813 return os.readlink(pathname)
818 except OSError, why:
814 except OSError, why:
819 if why.errno not in (errno.EINVAL, errno.ENOSYS):
815 if why.errno not in (errno.EINVAL, errno.ENOSYS):
820 raise
816 raise
821 except AttributeError: # no symlink in os
817 except AttributeError: # no symlink in os
822 pass
818 pass
823 return posixfile(pathname).read()
819 return posixfile(pathname).read()
824
820
825 def fstat(fp):
821 def fstat(fp):
826 '''stat file object that may not have fileno method.'''
822 '''stat file object that may not have fileno method.'''
827 try:
823 try:
828 return os.fstat(fp.fileno())
824 return os.fstat(fp.fileno())
829 except AttributeError:
825 except AttributeError:
830 return os.stat(fp.name)
826 return os.stat(fp.name)
831
827
832 # File system features
828 # File system features
833
829
834 def checkcase(path):
830 def checkcase(path):
835 """
831 """
836 Check whether the given path is on a case-sensitive filesystem
832 Check whether the given path is on a case-sensitive filesystem
837
833
838 Requires a path (like /foo/.hg) ending with a foldable final
834 Requires a path (like /foo/.hg) ending with a foldable final
839 directory component.
835 directory component.
840 """
836 """
841 s1 = os.stat(path)
837 s1 = os.stat(path)
842 d, b = os.path.split(path)
838 d, b = os.path.split(path)
843 p2 = os.path.join(d, b.upper())
839 p2 = os.path.join(d, b.upper())
844 if path == p2:
840 if path == p2:
845 p2 = os.path.join(d, b.lower())
841 p2 = os.path.join(d, b.lower())
846 try:
842 try:
847 s2 = os.stat(p2)
843 s2 = os.stat(p2)
848 if s2 == s1:
844 if s2 == s1:
849 return False
845 return False
850 return True
846 return True
851 except:
847 except:
852 return True
848 return True
853
849
854 _fspathcache = {}
850 _fspathcache = {}
855 def fspath(name, root):
851 def fspath(name, root):
856 '''Get name in the case stored in the filesystem
852 '''Get name in the case stored in the filesystem
857
853
858 The name is either relative to root, or it is an absolute path starting
854 The name is either relative to root, or it is an absolute path starting
859 with root. Note that this function is unnecessary, and should not be
855 with root. Note that this function is unnecessary, and should not be
860 called, for case-sensitive filesystems (simply because it's expensive).
856 called, for case-sensitive filesystems (simply because it's expensive).
861 '''
857 '''
862 # If name is absolute, make it relative
858 # If name is absolute, make it relative
863 if name.lower().startswith(root.lower()):
859 if name.lower().startswith(root.lower()):
864 l = len(root)
860 l = len(root)
865 if name[l] == os.sep or name[l] == os.altsep:
861 if name[l] == os.sep or name[l] == os.altsep:
866 l = l + 1
862 l = l + 1
867 name = name[l:]
863 name = name[l:]
868
864
869 if not os.path.exists(os.path.join(root, name)):
865 if not os.path.exists(os.path.join(root, name)):
870 return None
866 return None
871
867
872 seps = os.sep
868 seps = os.sep
873 if os.altsep:
869 if os.altsep:
874 seps = seps + os.altsep
870 seps = seps + os.altsep
875 # Protect backslashes. This gets silly very quickly.
871 # Protect backslashes. This gets silly very quickly.
876 seps.replace('\\','\\\\')
872 seps.replace('\\','\\\\')
877 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
873 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
878 dir = os.path.normcase(os.path.normpath(root))
874 dir = os.path.normcase(os.path.normpath(root))
879 result = []
875 result = []
880 for part, sep in pattern.findall(name):
876 for part, sep in pattern.findall(name):
881 if sep:
877 if sep:
882 result.append(sep)
878 result.append(sep)
883 continue
879 continue
884
880
885 if dir not in _fspathcache:
881 if dir not in _fspathcache:
886 _fspathcache[dir] = os.listdir(dir)
882 _fspathcache[dir] = os.listdir(dir)
887 contents = _fspathcache[dir]
883 contents = _fspathcache[dir]
888
884
889 lpart = part.lower()
885 lpart = part.lower()
890 for n in contents:
886 for n in contents:
891 if n.lower() == lpart:
887 if n.lower() == lpart:
892 result.append(n)
888 result.append(n)
893 break
889 break
894 else:
890 else:
895 # Cannot happen, as the file exists!
891 # Cannot happen, as the file exists!
896 result.append(part)
892 result.append(part)
897 dir = os.path.join(dir, lpart)
893 dir = os.path.join(dir, lpart)
898
894
899 return ''.join(result)
895 return ''.join(result)
900
896
901 def checkexec(path):
897 def checkexec(path):
902 """
898 """
903 Check whether the given path is on a filesystem with UNIX-like exec flags
899 Check whether the given path is on a filesystem with UNIX-like exec flags
904
900
905 Requires a directory (like /foo/.hg)
901 Requires a directory (like /foo/.hg)
906 """
902 """
907
903
908 # VFAT on some Linux versions can flip mode but it doesn't persist
904 # VFAT on some Linux versions can flip mode but it doesn't persist
909 # a FS remount. Frequently we can detect it if files are created
905 # a FS remount. Frequently we can detect it if files are created
910 # with exec bit on.
906 # with exec bit on.
911
907
912 try:
908 try:
913 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
909 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
914 fh, fn = tempfile.mkstemp("", "", path)
910 fh, fn = tempfile.mkstemp("", "", path)
915 try:
911 try:
916 os.close(fh)
912 os.close(fh)
917 m = os.stat(fn).st_mode & 0777
913 m = os.stat(fn).st_mode & 0777
918 new_file_has_exec = m & EXECFLAGS
914 new_file_has_exec = m & EXECFLAGS
919 os.chmod(fn, m ^ EXECFLAGS)
915 os.chmod(fn, m ^ EXECFLAGS)
920 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
916 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
921 finally:
917 finally:
922 os.unlink(fn)
918 os.unlink(fn)
923 except (IOError, OSError):
919 except (IOError, OSError):
924 # we don't care, the user probably won't be able to commit anyway
920 # we don't care, the user probably won't be able to commit anyway
925 return False
921 return False
926 return not (new_file_has_exec or exec_flags_cannot_flip)
922 return not (new_file_has_exec or exec_flags_cannot_flip)
927
923
928 def checklink(path):
924 def checklink(path):
929 """check whether the given path is on a symlink-capable filesystem"""
925 """check whether the given path is on a symlink-capable filesystem"""
930 # mktemp is not racy because symlink creation will fail if the
926 # mktemp is not racy because symlink creation will fail if the
931 # file already exists
927 # file already exists
932 name = tempfile.mktemp(dir=path)
928 name = tempfile.mktemp(dir=path)
933 try:
929 try:
934 os.symlink(".", name)
930 os.symlink(".", name)
935 os.unlink(name)
931 os.unlink(name)
936 return True
932 return True
937 except (OSError, AttributeError):
933 except (OSError, AttributeError):
938 return False
934 return False
939
935
940 def needbinarypatch():
936 def needbinarypatch():
941 """return True if patches should be applied in binary mode by default."""
937 """return True if patches should be applied in binary mode by default."""
942 return os.name == 'nt'
938 return os.name == 'nt'
943
939
944 def endswithsep(path):
940 def endswithsep(path):
945 '''Check path ends with os.sep or os.altsep.'''
941 '''Check path ends with os.sep or os.altsep.'''
946 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
942 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
947
943
948 def splitpath(path):
944 def splitpath(path):
949 '''Split path by os.sep.
945 '''Split path by os.sep.
950 Note that this function does not use os.altsep because this is
946 Note that this function does not use os.altsep because this is
951 an alternative of simple "xxx.split(os.sep)".
947 an alternative of simple "xxx.split(os.sep)".
952 It is recommended to use os.path.normpath() before using this
948 It is recommended to use os.path.normpath() before using this
953 function if need.'''
949 function if need.'''
954 return path.split(os.sep)
950 return path.split(os.sep)
955
951
956 def gui():
952 def gui():
957 '''Are we running in a GUI?'''
953 '''Are we running in a GUI?'''
958 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
954 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
959
955
960 def mktempcopy(name, emptyok=False, createmode=None):
956 def mktempcopy(name, emptyok=False, createmode=None):
961 """Create a temporary file with the same contents from name
957 """Create a temporary file with the same contents from name
962
958
963 The permission bits are copied from the original file.
959 The permission bits are copied from the original file.
964
960
965 If the temporary file is going to be truncated immediately, you
961 If the temporary file is going to be truncated immediately, you
966 can use emptyok=True as an optimization.
962 can use emptyok=True as an optimization.
967
963
968 Returns the name of the temporary file.
964 Returns the name of the temporary file.
969 """
965 """
970 d, fn = os.path.split(name)
966 d, fn = os.path.split(name)
971 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
967 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
972 os.close(fd)
968 os.close(fd)
973 # Temporary files are created with mode 0600, which is usually not
969 # Temporary files are created with mode 0600, which is usually not
974 # what we want. If the original file already exists, just copy
970 # what we want. If the original file already exists, just copy
975 # its mode. Otherwise, manually obey umask.
971 # its mode. Otherwise, manually obey umask.
976 try:
972 try:
977 st_mode = os.lstat(name).st_mode & 0777
973 st_mode = os.lstat(name).st_mode & 0777
978 except OSError, inst:
974 except OSError, inst:
979 if inst.errno != errno.ENOENT:
975 if inst.errno != errno.ENOENT:
980 raise
976 raise
981 st_mode = createmode
977 st_mode = createmode
982 if st_mode is None:
978 if st_mode is None:
983 st_mode = ~umask
979 st_mode = ~umask
984 st_mode &= 0666
980 st_mode &= 0666
985 os.chmod(temp, st_mode)
981 os.chmod(temp, st_mode)
986 if emptyok:
982 if emptyok:
987 return temp
983 return temp
988 try:
984 try:
989 try:
985 try:
990 ifp = posixfile(name, "rb")
986 ifp = posixfile(name, "rb")
991 except IOError, inst:
987 except IOError, inst:
992 if inst.errno == errno.ENOENT:
988 if inst.errno == errno.ENOENT:
993 return temp
989 return temp
994 if not getattr(inst, 'filename', None):
990 if not getattr(inst, 'filename', None):
995 inst.filename = name
991 inst.filename = name
996 raise
992 raise
997 ofp = posixfile(temp, "wb")
993 ofp = posixfile(temp, "wb")
998 for chunk in filechunkiter(ifp):
994 for chunk in filechunkiter(ifp):
999 ofp.write(chunk)
995 ofp.write(chunk)
1000 ifp.close()
996 ifp.close()
1001 ofp.close()
997 ofp.close()
1002 except:
998 except:
1003 try: os.unlink(temp)
999 try: os.unlink(temp)
1004 except: pass
1000 except: pass
1005 raise
1001 raise
1006 return temp
1002 return temp
1007
1003
1008 class atomictempfile(posixfile):
1004 class atomictempfile(posixfile):
1009 """file-like object that atomically updates a file
1005 """file-like object that atomically updates a file
1010
1006
1011 All writes will be redirected to a temporary copy of the original
1007 All writes will be redirected to a temporary copy of the original
1012 file. When rename is called, the copy is renamed to the original
1008 file. When rename is called, the copy is renamed to the original
1013 name, making the changes visible.
1009 name, making the changes visible.
1014 """
1010 """
1015 def __init__(self, name, mode, createmode):
1011 def __init__(self, name, mode, createmode):
1016 self.__name = name
1012 self.__name = name
1017 self.temp = mktempcopy(name, emptyok=('w' in mode),
1013 self.temp = mktempcopy(name, emptyok=('w' in mode),
1018 createmode=createmode)
1014 createmode=createmode)
1019 posixfile.__init__(self, self.temp, mode)
1015 posixfile.__init__(self, self.temp, mode)
1020
1016
1021 def rename(self):
1017 def rename(self):
1022 if not self.closed:
1018 if not self.closed:
1023 posixfile.close(self)
1019 posixfile.close(self)
1024 rename(self.temp, localpath(self.__name))
1020 rename(self.temp, localpath(self.__name))
1025
1021
1026 def __del__(self):
1022 def __del__(self):
1027 if not self.closed:
1023 if not self.closed:
1028 try:
1024 try:
1029 os.unlink(self.temp)
1025 os.unlink(self.temp)
1030 except: pass
1026 except: pass
1031 posixfile.close(self)
1027 posixfile.close(self)
1032
1028
1033 def makedirs(name, mode=None):
1029 def makedirs(name, mode=None):
1034 """recursive directory creation with parent mode inheritance"""
1030 """recursive directory creation with parent mode inheritance"""
1035 try:
1031 try:
1036 os.mkdir(name)
1032 os.mkdir(name)
1037 if mode is not None:
1033 if mode is not None:
1038 os.chmod(name, mode)
1034 os.chmod(name, mode)
1039 return
1035 return
1040 except OSError, err:
1036 except OSError, err:
1041 if err.errno == errno.EEXIST:
1037 if err.errno == errno.EEXIST:
1042 return
1038 return
1043 if err.errno != errno.ENOENT:
1039 if err.errno != errno.ENOENT:
1044 raise
1040 raise
1045 parent = os.path.abspath(os.path.dirname(name))
1041 parent = os.path.abspath(os.path.dirname(name))
1046 makedirs(parent, mode)
1042 makedirs(parent, mode)
1047 makedirs(name, mode)
1043 makedirs(name, mode)
1048
1044
1049 class opener(object):
1045 class opener(object):
1050 """Open files relative to a base directory
1046 """Open files relative to a base directory
1051
1047
1052 This class is used to hide the details of COW semantics and
1048 This class is used to hide the details of COW semantics and
1053 remote file access from higher level code.
1049 remote file access from higher level code.
1054 """
1050 """
1055 def __init__(self, base, audit=True):
1051 def __init__(self, base, audit=True):
1056 self.base = base
1052 self.base = base
1057 if audit:
1053 if audit:
1058 self.audit_path = path_auditor(base)
1054 self.audit_path = path_auditor(base)
1059 else:
1055 else:
1060 self.audit_path = always
1056 self.audit_path = always
1061 self.createmode = None
1057 self.createmode = None
1062
1058
1063 def __getattr__(self, name):
1059 def __getattr__(self, name):
1064 if name == '_can_symlink':
1060 if name == '_can_symlink':
1065 self._can_symlink = checklink(self.base)
1061 self._can_symlink = checklink(self.base)
1066 return self._can_symlink
1062 return self._can_symlink
1067 raise AttributeError(name)
1063 raise AttributeError(name)
1068
1064
1069 def _fixfilemode(self, name):
1065 def _fixfilemode(self, name):
1070 if self.createmode is None:
1066 if self.createmode is None:
1071 return
1067 return
1072 os.chmod(name, self.createmode & 0666)
1068 os.chmod(name, self.createmode & 0666)
1073
1069
1074 def __call__(self, path, mode="r", text=False, atomictemp=False):
1070 def __call__(self, path, mode="r", text=False, atomictemp=False):
1075 self.audit_path(path)
1071 self.audit_path(path)
1076 f = os.path.join(self.base, path)
1072 f = os.path.join(self.base, path)
1077
1073
1078 if not text and "b" not in mode:
1074 if not text and "b" not in mode:
1079 mode += "b" # for that other OS
1075 mode += "b" # for that other OS
1080
1076
1081 nlink = -1
1077 nlink = -1
1082 if mode not in ("r", "rb"):
1078 if mode not in ("r", "rb"):
1083 try:
1079 try:
1084 nlink = nlinks(f)
1080 nlink = nlinks(f)
1085 except OSError:
1081 except OSError:
1086 nlink = 0
1082 nlink = 0
1087 d = os.path.dirname(f)
1083 d = os.path.dirname(f)
1088 if not os.path.isdir(d):
1084 if not os.path.isdir(d):
1089 makedirs(d, self.createmode)
1085 makedirs(d, self.createmode)
1090 if atomictemp:
1086 if atomictemp:
1091 return atomictempfile(f, mode, self.createmode)
1087 return atomictempfile(f, mode, self.createmode)
1092 if nlink > 1:
1088 if nlink > 1:
1093 rename(mktempcopy(f), f)
1089 rename(mktempcopy(f), f)
1094 fp = posixfile(f, mode)
1090 fp = posixfile(f, mode)
1095 if nlink == 0:
1091 if nlink == 0:
1096 self._fixfilemode(f)
1092 self._fixfilemode(f)
1097 return fp
1093 return fp
1098
1094
1099 def symlink(self, src, dst):
1095 def symlink(self, src, dst):
1100 self.audit_path(dst)
1096 self.audit_path(dst)
1101 linkname = os.path.join(self.base, dst)
1097 linkname = os.path.join(self.base, dst)
1102 try:
1098 try:
1103 os.unlink(linkname)
1099 os.unlink(linkname)
1104 except OSError:
1100 except OSError:
1105 pass
1101 pass
1106
1102
1107 dirname = os.path.dirname(linkname)
1103 dirname = os.path.dirname(linkname)
1108 if not os.path.exists(dirname):
1104 if not os.path.exists(dirname):
1109 makedirs(dirname, self.createmode)
1105 makedirs(dirname, self.createmode)
1110
1106
1111 if self._can_symlink:
1107 if self._can_symlink:
1112 try:
1108 try:
1113 os.symlink(src, linkname)
1109 os.symlink(src, linkname)
1114 except OSError, err:
1110 except OSError, err:
1115 raise OSError(err.errno, _('could not symlink to %r: %s') %
1111 raise OSError(err.errno, _('could not symlink to %r: %s') %
1116 (src, err.strerror), linkname)
1112 (src, err.strerror), linkname)
1117 else:
1113 else:
1118 f = self(dst, "w")
1114 f = self(dst, "w")
1119 f.write(src)
1115 f.write(src)
1120 f.close()
1116 f.close()
1121 self._fixfilemode(dst)
1117 self._fixfilemode(dst)
1122
1118
1123 class chunkbuffer(object):
1119 class chunkbuffer(object):
1124 """Allow arbitrary sized chunks of data to be efficiently read from an
1120 """Allow arbitrary sized chunks of data to be efficiently read from an
1125 iterator over chunks of arbitrary size."""
1121 iterator over chunks of arbitrary size."""
1126
1122
1127 def __init__(self, in_iter):
1123 def __init__(self, in_iter):
1128 """in_iter is the iterator that's iterating over the input chunks.
1124 """in_iter is the iterator that's iterating over the input chunks.
1129 targetsize is how big a buffer to try to maintain."""
1125 targetsize is how big a buffer to try to maintain."""
1130 self.iter = iter(in_iter)
1126 self.iter = iter(in_iter)
1131 self.buf = ''
1127 self.buf = ''
1132 self.targetsize = 2**16
1128 self.targetsize = 2**16
1133
1129
1134 def read(self, l):
1130 def read(self, l):
1135 """Read L bytes of data from the iterator of chunks of data.
1131 """Read L bytes of data from the iterator of chunks of data.
1136 Returns less than L bytes if the iterator runs dry."""
1132 Returns less than L bytes if the iterator runs dry."""
1137 if l > len(self.buf) and self.iter:
1133 if l > len(self.buf) and self.iter:
1138 # Clamp to a multiple of self.targetsize
1134 # Clamp to a multiple of self.targetsize
1139 targetsize = max(l, self.targetsize)
1135 targetsize = max(l, self.targetsize)
1140 collector = cStringIO.StringIO()
1136 collector = cStringIO.StringIO()
1141 collector.write(self.buf)
1137 collector.write(self.buf)
1142 collected = len(self.buf)
1138 collected = len(self.buf)
1143 for chunk in self.iter:
1139 for chunk in self.iter:
1144 collector.write(chunk)
1140 collector.write(chunk)
1145 collected += len(chunk)
1141 collected += len(chunk)
1146 if collected >= targetsize:
1142 if collected >= targetsize:
1147 break
1143 break
1148 if collected < targetsize:
1144 if collected < targetsize:
1149 self.iter = False
1145 self.iter = False
1150 self.buf = collector.getvalue()
1146 self.buf = collector.getvalue()
1151 if len(self.buf) == l:
1147 if len(self.buf) == l:
1152 s, self.buf = str(self.buf), ''
1148 s, self.buf = str(self.buf), ''
1153 else:
1149 else:
1154 s, self.buf = self.buf[:l], buffer(self.buf, l)
1150 s, self.buf = self.buf[:l], buffer(self.buf, l)
1155 return s
1151 return s
1156
1152
1157 def filechunkiter(f, size=65536, limit=None):
1153 def filechunkiter(f, size=65536, limit=None):
1158 """Create a generator that produces the data in the file size
1154 """Create a generator that produces the data in the file size
1159 (default 65536) bytes at a time, up to optional limit (default is
1155 (default 65536) bytes at a time, up to optional limit (default is
1160 to read all data). Chunks may be less than size bytes if the
1156 to read all data). Chunks may be less than size bytes if the
1161 chunk is the last chunk in the file, or the file is a socket or
1157 chunk is the last chunk in the file, or the file is a socket or
1162 some other type of file that sometimes reads less data than is
1158 some other type of file that sometimes reads less data than is
1163 requested."""
1159 requested."""
1164 assert size >= 0
1160 assert size >= 0
1165 assert limit is None or limit >= 0
1161 assert limit is None or limit >= 0
1166 while True:
1162 while True:
1167 if limit is None: nbytes = size
1163 if limit is None: nbytes = size
1168 else: nbytes = min(limit, size)
1164 else: nbytes = min(limit, size)
1169 s = nbytes and f.read(nbytes)
1165 s = nbytes and f.read(nbytes)
1170 if not s: break
1166 if not s: break
1171 if limit: limit -= len(s)
1167 if limit: limit -= len(s)
1172 yield s
1168 yield s
1173
1169
1174 def makedate():
1170 def makedate():
1175 lt = time.localtime()
1171 lt = time.localtime()
1176 if lt[8] == 1 and time.daylight:
1172 if lt[8] == 1 and time.daylight:
1177 tz = time.altzone
1173 tz = time.altzone
1178 else:
1174 else:
1179 tz = time.timezone
1175 tz = time.timezone
1180 return time.mktime(lt), tz
1176 return time.mktime(lt), tz
1181
1177
1182 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1178 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1183 """represent a (unixtime, offset) tuple as a localized time.
1179 """represent a (unixtime, offset) tuple as a localized time.
1184 unixtime is seconds since the epoch, and offset is the time zone's
1180 unixtime is seconds since the epoch, and offset is the time zone's
1185 number of seconds away from UTC. if timezone is false, do not
1181 number of seconds away from UTC. if timezone is false, do not
1186 append time zone to string."""
1182 append time zone to string."""
1187 t, tz = date or makedate()
1183 t, tz = date or makedate()
1188 if "%1" in format or "%2" in format:
1184 if "%1" in format or "%2" in format:
1189 sign = (tz > 0) and "-" or "+"
1185 sign = (tz > 0) and "-" or "+"
1190 minutes = abs(tz) / 60
1186 minutes = abs(tz) / 60
1191 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1187 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1192 format = format.replace("%2", "%02d" % (minutes % 60))
1188 format = format.replace("%2", "%02d" % (minutes % 60))
1193 s = time.strftime(format, time.gmtime(float(t) - tz))
1189 s = time.strftime(format, time.gmtime(float(t) - tz))
1194 return s
1190 return s
1195
1191
1196 def shortdate(date=None):
1192 def shortdate(date=None):
1197 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1193 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1198 return datestr(date, format='%Y-%m-%d')
1194 return datestr(date, format='%Y-%m-%d')
1199
1195
1200 def strdate(string, format, defaults=[]):
1196 def strdate(string, format, defaults=[]):
1201 """parse a localized time string and return a (unixtime, offset) tuple.
1197 """parse a localized time string and return a (unixtime, offset) tuple.
1202 if the string cannot be parsed, ValueError is raised."""
1198 if the string cannot be parsed, ValueError is raised."""
1203 def timezone(string):
1199 def timezone(string):
1204 tz = string.split()[-1]
1200 tz = string.split()[-1]
1205 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1201 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1206 sign = (tz[0] == "+") and 1 or -1
1202 sign = (tz[0] == "+") and 1 or -1
1207 hours = int(tz[1:3])
1203 hours = int(tz[1:3])
1208 minutes = int(tz[3:5])
1204 minutes = int(tz[3:5])
1209 return -sign * (hours * 60 + minutes) * 60
1205 return -sign * (hours * 60 + minutes) * 60
1210 if tz == "GMT" or tz == "UTC":
1206 if tz == "GMT" or tz == "UTC":
1211 return 0
1207 return 0
1212 return None
1208 return None
1213
1209
1214 # NOTE: unixtime = localunixtime + offset
1210 # NOTE: unixtime = localunixtime + offset
1215 offset, date = timezone(string), string
1211 offset, date = timezone(string), string
1216 if offset != None:
1212 if offset != None:
1217 date = " ".join(string.split()[:-1])
1213 date = " ".join(string.split()[:-1])
1218
1214
1219 # add missing elements from defaults
1215 # add missing elements from defaults
1220 for part in defaults:
1216 for part in defaults:
1221 found = [True for p in part if ("%"+p) in format]
1217 found = [True for p in part if ("%"+p) in format]
1222 if not found:
1218 if not found:
1223 date += "@" + defaults[part]
1219 date += "@" + defaults[part]
1224 format += "@%" + part[0]
1220 format += "@%" + part[0]
1225
1221
1226 timetuple = time.strptime(date, format)
1222 timetuple = time.strptime(date, format)
1227 localunixtime = int(calendar.timegm(timetuple))
1223 localunixtime = int(calendar.timegm(timetuple))
1228 if offset is None:
1224 if offset is None:
1229 # local timezone
1225 # local timezone
1230 unixtime = int(time.mktime(timetuple))
1226 unixtime = int(time.mktime(timetuple))
1231 offset = unixtime - localunixtime
1227 offset = unixtime - localunixtime
1232 else:
1228 else:
1233 unixtime = localunixtime + offset
1229 unixtime = localunixtime + offset
1234 return unixtime, offset
1230 return unixtime, offset
1235
1231
1236 def parsedate(date, formats=None, defaults=None):
1232 def parsedate(date, formats=None, defaults=None):
1237 """parse a localized date/time string and return a (unixtime, offset) tuple.
1233 """parse a localized date/time string and return a (unixtime, offset) tuple.
1238
1234
1239 The date may be a "unixtime offset" string or in one of the specified
1235 The date may be a "unixtime offset" string or in one of the specified
1240 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1236 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1241 """
1237 """
1242 if not date:
1238 if not date:
1243 return 0, 0
1239 return 0, 0
1244 if isinstance(date, tuple) and len(date) == 2:
1240 if isinstance(date, tuple) and len(date) == 2:
1245 return date
1241 return date
1246 if not formats:
1242 if not formats:
1247 formats = defaultdateformats
1243 formats = defaultdateformats
1248 date = date.strip()
1244 date = date.strip()
1249 try:
1245 try:
1250 when, offset = map(int, date.split(' '))
1246 when, offset = map(int, date.split(' '))
1251 except ValueError:
1247 except ValueError:
1252 # fill out defaults
1248 # fill out defaults
1253 if not defaults:
1249 if not defaults:
1254 defaults = {}
1250 defaults = {}
1255 now = makedate()
1251 now = makedate()
1256 for part in "d mb yY HI M S".split():
1252 for part in "d mb yY HI M S".split():
1257 if part not in defaults:
1253 if part not in defaults:
1258 if part[0] in "HMS":
1254 if part[0] in "HMS":
1259 defaults[part] = "00"
1255 defaults[part] = "00"
1260 else:
1256 else:
1261 defaults[part] = datestr(now, "%" + part[0])
1257 defaults[part] = datestr(now, "%" + part[0])
1262
1258
1263 for format in formats:
1259 for format in formats:
1264 try:
1260 try:
1265 when, offset = strdate(date, format, defaults)
1261 when, offset = strdate(date, format, defaults)
1266 except (ValueError, OverflowError):
1262 except (ValueError, OverflowError):
1267 pass
1263 pass
1268 else:
1264 else:
1269 break
1265 break
1270 else:
1266 else:
1271 raise Abort(_('invalid date: %r ') % date)
1267 raise Abort(_('invalid date: %r ') % date)
1272 # validate explicit (probably user-specified) date and
1268 # validate explicit (probably user-specified) date and
1273 # time zone offset. values must fit in signed 32 bits for
1269 # time zone offset. values must fit in signed 32 bits for
1274 # current 32-bit linux runtimes. timezones go from UTC-12
1270 # current 32-bit linux runtimes. timezones go from UTC-12
1275 # to UTC+14
1271 # to UTC+14
1276 if abs(when) > 0x7fffffff:
1272 if abs(when) > 0x7fffffff:
1277 raise Abort(_('date exceeds 32 bits: %d') % when)
1273 raise Abort(_('date exceeds 32 bits: %d') % when)
1278 if offset < -50400 or offset > 43200:
1274 if offset < -50400 or offset > 43200:
1279 raise Abort(_('impossible time zone offset: %d') % offset)
1275 raise Abort(_('impossible time zone offset: %d') % offset)
1280 return when, offset
1276 return when, offset
1281
1277
1282 def matchdate(date):
1278 def matchdate(date):
1283 """Return a function that matches a given date match specifier
1279 """Return a function that matches a given date match specifier
1284
1280
1285 Formats include:
1281 Formats include:
1286
1282
1287 '{date}' match a given date to the accuracy provided
1283 '{date}' match a given date to the accuracy provided
1288
1284
1289 '<{date}' on or before a given date
1285 '<{date}' on or before a given date
1290
1286
1291 '>{date}' on or after a given date
1287 '>{date}' on or after a given date
1292
1288
1293 """
1289 """
1294
1290
1295 def lower(date):
1291 def lower(date):
1296 d = dict(mb="1", d="1")
1292 d = dict(mb="1", d="1")
1297 return parsedate(date, extendeddateformats, d)[0]
1293 return parsedate(date, extendeddateformats, d)[0]
1298
1294
1299 def upper(date):
1295 def upper(date):
1300 d = dict(mb="12", HI="23", M="59", S="59")
1296 d = dict(mb="12", HI="23", M="59", S="59")
1301 for days in "31 30 29".split():
1297 for days in "31 30 29".split():
1302 try:
1298 try:
1303 d["d"] = days
1299 d["d"] = days
1304 return parsedate(date, extendeddateformats, d)[0]
1300 return parsedate(date, extendeddateformats, d)[0]
1305 except:
1301 except:
1306 pass
1302 pass
1307 d["d"] = "28"
1303 d["d"] = "28"
1308 return parsedate(date, extendeddateformats, d)[0]
1304 return parsedate(date, extendeddateformats, d)[0]
1309
1305
1310 date = date.strip()
1306 date = date.strip()
1311 if date[0] == "<":
1307 if date[0] == "<":
1312 when = upper(date[1:])
1308 when = upper(date[1:])
1313 return lambda x: x <= when
1309 return lambda x: x <= when
1314 elif date[0] == ">":
1310 elif date[0] == ">":
1315 when = lower(date[1:])
1311 when = lower(date[1:])
1316 return lambda x: x >= when
1312 return lambda x: x >= when
1317 elif date[0] == "-":
1313 elif date[0] == "-":
1318 try:
1314 try:
1319 days = int(date[1:])
1315 days = int(date[1:])
1320 except ValueError:
1316 except ValueError:
1321 raise Abort(_("invalid day spec: %s") % date[1:])
1317 raise Abort(_("invalid day spec: %s") % date[1:])
1322 when = makedate()[0] - days * 3600 * 24
1318 when = makedate()[0] - days * 3600 * 24
1323 return lambda x: x >= when
1319 return lambda x: x >= when
1324 elif " to " in date:
1320 elif " to " in date:
1325 a, b = date.split(" to ")
1321 a, b = date.split(" to ")
1326 start, stop = lower(a), upper(b)
1322 start, stop = lower(a), upper(b)
1327 return lambda x: x >= start and x <= stop
1323 return lambda x: x >= start and x <= stop
1328 else:
1324 else:
1329 start, stop = lower(date), upper(date)
1325 start, stop = lower(date), upper(date)
1330 return lambda x: x >= start and x <= stop
1326 return lambda x: x >= start and x <= stop
1331
1327
1332 def shortuser(user):
1328 def shortuser(user):
1333 """Return a short representation of a user name or email address."""
1329 """Return a short representation of a user name or email address."""
1334 f = user.find('@')
1330 f = user.find('@')
1335 if f >= 0:
1331 if f >= 0:
1336 user = user[:f]
1332 user = user[:f]
1337 f = user.find('<')
1333 f = user.find('<')
1338 if f >= 0:
1334 if f >= 0:
1339 user = user[f+1:]
1335 user = user[f+1:]
1340 f = user.find(' ')
1336 f = user.find(' ')
1341 if f >= 0:
1337 if f >= 0:
1342 user = user[:f]
1338 user = user[:f]
1343 f = user.find('.')
1339 f = user.find('.')
1344 if f >= 0:
1340 if f >= 0:
1345 user = user[:f]
1341 user = user[:f]
1346 return user
1342 return user
1347
1343
1348 def email(author):
1344 def email(author):
1349 '''get email of author.'''
1345 '''get email of author.'''
1350 r = author.find('>')
1346 r = author.find('>')
1351 if r == -1: r = None
1347 if r == -1: r = None
1352 return author[author.find('<')+1:r]
1348 return author[author.find('<')+1:r]
1353
1349
1354 def ellipsis(text, maxlength=400):
1350 def ellipsis(text, maxlength=400):
1355 """Trim string to at most maxlength (default: 400) characters."""
1351 """Trim string to at most maxlength (default: 400) characters."""
1356 if len(text) <= maxlength:
1352 if len(text) <= maxlength:
1357 return text
1353 return text
1358 else:
1354 else:
1359 return "%s..." % (text[:maxlength-3])
1355 return "%s..." % (text[:maxlength-3])
1360
1356
1361 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1357 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1362 '''yield every hg repository under path, recursively.'''
1358 '''yield every hg repository under path, recursively.'''
1363 def errhandler(err):
1359 def errhandler(err):
1364 if err.filename == path:
1360 if err.filename == path:
1365 raise err
1361 raise err
1366 if followsym and hasattr(os.path, 'samestat'):
1362 if followsym and hasattr(os.path, 'samestat'):
1367 def _add_dir_if_not_there(dirlst, dirname):
1363 def _add_dir_if_not_there(dirlst, dirname):
1368 match = False
1364 match = False
1369 samestat = os.path.samestat
1365 samestat = os.path.samestat
1370 dirstat = os.stat(dirname)
1366 dirstat = os.stat(dirname)
1371 for lstdirstat in dirlst:
1367 for lstdirstat in dirlst:
1372 if samestat(dirstat, lstdirstat):
1368 if samestat(dirstat, lstdirstat):
1373 match = True
1369 match = True
1374 break
1370 break
1375 if not match:
1371 if not match:
1376 dirlst.append(dirstat)
1372 dirlst.append(dirstat)
1377 return not match
1373 return not match
1378 else:
1374 else:
1379 followsym = False
1375 followsym = False
1380
1376
1381 if (seen_dirs is None) and followsym:
1377 if (seen_dirs is None) and followsym:
1382 seen_dirs = []
1378 seen_dirs = []
1383 _add_dir_if_not_there(seen_dirs, path)
1379 _add_dir_if_not_there(seen_dirs, path)
1384 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1380 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1385 if '.hg' in dirs:
1381 if '.hg' in dirs:
1386 yield root # found a repository
1382 yield root # found a repository
1387 qroot = os.path.join(root, '.hg', 'patches')
1383 qroot = os.path.join(root, '.hg', 'patches')
1388 if os.path.isdir(os.path.join(qroot, '.hg')):
1384 if os.path.isdir(os.path.join(qroot, '.hg')):
1389 yield qroot # we have a patch queue repo here
1385 yield qroot # we have a patch queue repo here
1390 if recurse:
1386 if recurse:
1391 # avoid recursing inside the .hg directory
1387 # avoid recursing inside the .hg directory
1392 dirs.remove('.hg')
1388 dirs.remove('.hg')
1393 else:
1389 else:
1394 dirs[:] = [] # don't descend further
1390 dirs[:] = [] # don't descend further
1395 elif followsym:
1391 elif followsym:
1396 newdirs = []
1392 newdirs = []
1397 for d in dirs:
1393 for d in dirs:
1398 fname = os.path.join(root, d)
1394 fname = os.path.join(root, d)
1399 if _add_dir_if_not_there(seen_dirs, fname):
1395 if _add_dir_if_not_there(seen_dirs, fname):
1400 if os.path.islink(fname):
1396 if os.path.islink(fname):
1401 for hgname in walkrepos(fname, True, seen_dirs):
1397 for hgname in walkrepos(fname, True, seen_dirs):
1402 yield hgname
1398 yield hgname
1403 else:
1399 else:
1404 newdirs.append(d)
1400 newdirs.append(d)
1405 dirs[:] = newdirs
1401 dirs[:] = newdirs
1406
1402
1407 _rcpath = None
1403 _rcpath = None
1408
1404
1409 def os_rcpath():
1405 def os_rcpath():
1410 '''return default os-specific hgrc search path'''
1406 '''return default os-specific hgrc search path'''
1411 path = system_rcpath()
1407 path = system_rcpath()
1412 path.extend(user_rcpath())
1408 path.extend(user_rcpath())
1413 path = [os.path.normpath(f) for f in path]
1409 path = [os.path.normpath(f) for f in path]
1414 return path
1410 return path
1415
1411
1416 def rcpath():
1412 def rcpath():
1417 '''return hgrc search path. if env var HGRCPATH is set, use it.
1413 '''return hgrc search path. if env var HGRCPATH is set, use it.
1418 for each item in path, if directory, use files ending in .rc,
1414 for each item in path, if directory, use files ending in .rc,
1419 else use item.
1415 else use item.
1420 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1416 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1421 if no HGRCPATH, use default os-specific path.'''
1417 if no HGRCPATH, use default os-specific path.'''
1422 global _rcpath
1418 global _rcpath
1423 if _rcpath is None:
1419 if _rcpath is None:
1424 if 'HGRCPATH' in os.environ:
1420 if 'HGRCPATH' in os.environ:
1425 _rcpath = []
1421 _rcpath = []
1426 for p in os.environ['HGRCPATH'].split(os.pathsep):
1422 for p in os.environ['HGRCPATH'].split(os.pathsep):
1427 if not p: continue
1423 if not p: continue
1428 if os.path.isdir(p):
1424 if os.path.isdir(p):
1429 for f, kind in osutil.listdir(p):
1425 for f, kind in osutil.listdir(p):
1430 if f.endswith('.rc'):
1426 if f.endswith('.rc'):
1431 _rcpath.append(os.path.join(p, f))
1427 _rcpath.append(os.path.join(p, f))
1432 else:
1428 else:
1433 _rcpath.append(p)
1429 _rcpath.append(p)
1434 else:
1430 else:
1435 _rcpath = os_rcpath()
1431 _rcpath = os_rcpath()
1436 return _rcpath
1432 return _rcpath
1437
1433
1438 def bytecount(nbytes):
1434 def bytecount(nbytes):
1439 '''return byte count formatted as readable string, with units'''
1435 '''return byte count formatted as readable string, with units'''
1440
1436
1441 units = (
1437 units = (
1442 (100, 1<<30, _('%.0f GB')),
1438 (100, 1<<30, _('%.0f GB')),
1443 (10, 1<<30, _('%.1f GB')),
1439 (10, 1<<30, _('%.1f GB')),
1444 (1, 1<<30, _('%.2f GB')),
1440 (1, 1<<30, _('%.2f GB')),
1445 (100, 1<<20, _('%.0f MB')),
1441 (100, 1<<20, _('%.0f MB')),
1446 (10, 1<<20, _('%.1f MB')),
1442 (10, 1<<20, _('%.1f MB')),
1447 (1, 1<<20, _('%.2f MB')),
1443 (1, 1<<20, _('%.2f MB')),
1448 (100, 1<<10, _('%.0f KB')),
1444 (100, 1<<10, _('%.0f KB')),
1449 (10, 1<<10, _('%.1f KB')),
1445 (10, 1<<10, _('%.1f KB')),
1450 (1, 1<<10, _('%.2f KB')),
1446 (1, 1<<10, _('%.2f KB')),
1451 (1, 1, _('%.0f bytes')),
1447 (1, 1, _('%.0f bytes')),
1452 )
1448 )
1453
1449
1454 for multiplier, divisor, format in units:
1450 for multiplier, divisor, format in units:
1455 if nbytes >= divisor * multiplier:
1451 if nbytes >= divisor * multiplier:
1456 return format % (nbytes / float(divisor))
1452 return format % (nbytes / float(divisor))
1457 return units[-1][2] % nbytes
1453 return units[-1][2] % nbytes
1458
1454
1459 def drop_scheme(scheme, path):
1455 def drop_scheme(scheme, path):
1460 sc = scheme + ':'
1456 sc = scheme + ':'
1461 if path.startswith(sc):
1457 if path.startswith(sc):
1462 path = path[len(sc):]
1458 path = path[len(sc):]
1463 if path.startswith('//'):
1459 if path.startswith('//'):
1464 path = path[2:]
1460 path = path[2:]
1465 return path
1461 return path
1466
1462
1467 def uirepr(s):
1463 def uirepr(s):
1468 # Avoid double backslash in Windows path repr()
1464 # Avoid double backslash in Windows path repr()
1469 return repr(s).replace('\\\\', '\\')
1465 return repr(s).replace('\\\\', '\\')
1470
1466
1471 def termwidth():
1467 def termwidth():
1472 if 'COLUMNS' in os.environ:
1468 if 'COLUMNS' in os.environ:
1473 try:
1469 try:
1474 return int(os.environ['COLUMNS'])
1470 return int(os.environ['COLUMNS'])
1475 except ValueError:
1471 except ValueError:
1476 pass
1472 pass
1477 try:
1473 try:
1478 import termios, array, fcntl
1474 import termios, array, fcntl
1479 for dev in (sys.stdout, sys.stdin):
1475 for dev in (sys.stdout, sys.stdin):
1480 try:
1476 try:
1481 fd = dev.fileno()
1477 fd = dev.fileno()
1482 if not os.isatty(fd):
1478 if not os.isatty(fd):
1483 continue
1479 continue
1484 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1480 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1485 return array.array('h', arri)[1]
1481 return array.array('h', arri)[1]
1486 except ValueError:
1482 except ValueError:
1487 pass
1483 pass
1488 except ImportError:
1484 except ImportError:
1489 pass
1485 pass
1490 return 80
1486 return 80
1491
1487
1492 def iterlines(iterator):
1488 def iterlines(iterator):
1493 for chunk in iterator:
1489 for chunk in iterator:
1494 for line in chunk.splitlines():
1490 for line in chunk.splitlines():
1495 yield line
1491 yield line
@@ -1,245 +1,245 b''
1 # verify.py - repository integrity checking for Mercurial
1 # verify.py - repository integrity checking for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import revlog, util, error
10 import revlog, util, error
11
11
12 def verify(repo):
12 def verify(repo):
13 lock = repo.lock()
13 lock = repo.lock()
14 try:
14 try:
15 return _verify(repo)
15 return _verify(repo)
16 finally:
16 finally:
17 lock.release()
17 lock.release()
18
18
19 def _verify(repo):
19 def _verify(repo):
20 mflinkrevs = {}
20 mflinkrevs = {}
21 filelinkrevs = {}
21 filelinkrevs = {}
22 filenodes = {}
22 filenodes = {}
23 revisions = 0
23 revisions = 0
24 badrevs = {}
24 badrevs = {}
25 errors = [0]
25 errors = [0]
26 warnings = [0]
26 warnings = [0]
27 ui = repo.ui
27 ui = repo.ui
28 cl = repo.changelog
28 cl = repo.changelog
29 mf = repo.manifest
29 mf = repo.manifest
30
30
31 if not repo.cancopy():
31 if not repo.cancopy():
32 raise util.Abort(_("cannot verify bundle or remote repos"))
32 raise util.Abort(_("cannot verify bundle or remote repos"))
33
33
34 def err(linkrev, msg, filename=None):
34 def err(linkrev, msg, filename=None):
35 if linkrev != None:
35 if linkrev != None:
36 badrevs[linkrev] = True
36 badrevs[linkrev] = True
37 else:
37 else:
38 linkrev = '?'
38 linkrev = '?'
39 msg = "%s: %s" % (linkrev, msg)
39 msg = "%s: %s" % (linkrev, msg)
40 if filename:
40 if filename:
41 msg = "%s@%s" % (filename, msg)
41 msg = "%s@%s" % (filename, msg)
42 ui.warn(" " + msg + "\n")
42 ui.warn(" " + msg + "\n")
43 errors[0] += 1
43 errors[0] += 1
44
44
45 def exc(linkrev, msg, inst, filename=None):
45 def exc(linkrev, msg, inst, filename=None):
46 if isinstance(inst, KeyboardInterrupt):
46 if isinstance(inst, KeyboardInterrupt):
47 ui.warn(_("interrupted"))
47 ui.warn(_("interrupted"))
48 raise
48 raise
49 err(linkrev, "%s: %s" % (msg, inst), filename)
49 err(linkrev, "%s: %s" % (msg, inst), filename)
50
50
51 def warn(msg):
51 def warn(msg):
52 ui.warn(msg + "\n")
52 ui.warn(msg + "\n")
53 warnings[0] += 1
53 warnings[0] += 1
54
54
55 def checklog(obj, name):
55 def checklog(obj, name):
56 if not len(obj) and (havecl or havemf):
56 if not len(obj) and (havecl or havemf):
57 err(0, _("empty or missing %s") % name)
57 err(0, _("empty or missing %s") % name)
58 return
58 return
59
59
60 d = obj.checksize()
60 d = obj.checksize()
61 if d[0]:
61 if d[0]:
62 err(None, _("data length off by %d bytes") % d[0], name)
62 err(None, _("data length off by %d bytes") % d[0], name)
63 if d[1]:
63 if d[1]:
64 err(None, _("index contains %d extra bytes") % d[1], name)
64 err(None, _("index contains %d extra bytes") % d[1], name)
65
65
66 if obj.version != revlog.REVLOGV0:
66 if obj.version != revlog.REVLOGV0:
67 if not revlogv1:
67 if not revlogv1:
68 warn(_("warning: `%s' uses revlog format 1") % name)
68 warn(_("warning: `%s' uses revlog format 1") % name)
69 elif revlogv1:
69 elif revlogv1:
70 warn(_("warning: `%s' uses revlog format 0") % name)
70 warn(_("warning: `%s' uses revlog format 0") % name)
71
71
72 def checkentry(obj, i, node, seen, linkrevs, f):
72 def checkentry(obj, i, node, seen, linkrevs, f):
73 lr = obj.linkrev(obj.rev(node))
73 lr = obj.linkrev(obj.rev(node))
74 if lr < 0 or (havecl and lr not in linkrevs):
74 if lr < 0 or (havecl and lr not in linkrevs):
75 if lr < 0 or lr >= len(cl):
75 if lr < 0 or lr >= len(cl):
76 msg = _("rev %d points to nonexistent changeset %d")
76 msg = _("rev %d points to nonexistent changeset %d")
77 else:
77 else:
78 msg = _("rev %d points to unexpected changeset %d")
78 msg = _("rev %d points to unexpected changeset %d")
79 err(None, msg % (i, lr), f)
79 err(None, msg % (i, lr), f)
80 if linkrevs:
80 if linkrevs:
81 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
81 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
82 lr = None # can't be trusted
82 lr = None # can't be trusted
83
83
84 try:
84 try:
85 p1, p2 = obj.parents(node)
85 p1, p2 = obj.parents(node)
86 if p1 not in seen and p1 != nullid:
86 if p1 not in seen and p1 != nullid:
87 err(lr, _("unknown parent 1 %s of %s") %
87 err(lr, _("unknown parent 1 %s of %s") %
88 (short(p1), short(n)), f)
88 (short(p1), short(n)), f)
89 if p2 not in seen and p2 != nullid:
89 if p2 not in seen and p2 != nullid:
90 err(lr, _("unknown parent 2 %s of %s") %
90 err(lr, _("unknown parent 2 %s of %s") %
91 (short(p2), short(p1)), f)
91 (short(p2), short(p1)), f)
92 except Exception, inst:
92 except Exception, inst:
93 exc(lr, _("checking parents of %s") % short(node), inst, f)
93 exc(lr, _("checking parents of %s") % short(node), inst, f)
94
94
95 if node in seen:
95 if node in seen:
96 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
96 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
97 seen[n] = i
97 seen[n] = i
98 return lr
98 return lr
99
99
100 revlogv1 = cl.version != revlog.REVLOGV0
100 revlogv1 = cl.version != revlog.REVLOGV0
101 if ui.verbose or not revlogv1:
101 if ui.verbose or not revlogv1:
102 ui.status(_("repository uses revlog format %d\n") %
102 ui.status(_("repository uses revlog format %d\n") %
103 (revlogv1 and 1 or 0))
103 (revlogv1 and 1 or 0))
104
104
105 havecl = len(cl) > 0
105 havecl = len(cl) > 0
106 havemf = len(mf) > 0
106 havemf = len(mf) > 0
107
107
108 ui.status(_("checking changesets\n"))
108 ui.status(_("checking changesets\n"))
109 seen = {}
109 seen = {}
110 checklog(cl, "changelog")
110 checklog(cl, "changelog")
111 for i in repo:
111 for i in repo:
112 n = cl.node(i)
112 n = cl.node(i)
113 checkentry(cl, i, n, seen, [i], "changelog")
113 checkentry(cl, i, n, seen, [i], "changelog")
114
114
115 try:
115 try:
116 changes = cl.read(n)
116 changes = cl.read(n)
117 mflinkrevs.setdefault(changes[0], []).append(i)
117 mflinkrevs.setdefault(changes[0], []).append(i)
118 for f in changes[3]:
118 for f in changes[3]:
119 filelinkrevs.setdefault(f, []).append(i)
119 filelinkrevs.setdefault(f, []).append(i)
120 except Exception, inst:
120 except Exception, inst:
121 exc(i, _("unpacking changeset %s") % short(n), inst)
121 exc(i, _("unpacking changeset %s") % short(n), inst)
122
122
123 ui.status(_("checking manifests\n"))
123 ui.status(_("checking manifests\n"))
124 seen = {}
124 seen = {}
125 checklog(mf, "manifest")
125 checklog(mf, "manifest")
126 for i in mf:
126 for i in mf:
127 n = mf.node(i)
127 n = mf.node(i)
128 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
128 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
129 if n in mflinkrevs:
129 if n in mflinkrevs:
130 del mflinkrevs[n]
130 del mflinkrevs[n]
131
131
132 try:
132 try:
133 for f, fn in mf.readdelta(n).iteritems():
133 for f, fn in mf.readdelta(n).iteritems():
134 if not f:
134 if not f:
135 err(lr, _("file without name in manifest"))
135 err(lr, _("file without name in manifest"))
136 elif f != "/dev/null":
136 elif f != "/dev/null":
137 fns = filenodes.setdefault(f, {})
137 fns = filenodes.setdefault(f, {})
138 if fn not in fns:
138 if fn not in fns:
139 fns[fn] = i
139 fns[fn] = i
140 except Exception, inst:
140 except Exception, inst:
141 exc(lr, _("reading manifest delta %s") % short(n), inst)
141 exc(lr, _("reading manifest delta %s") % short(n), inst)
142
142
143 ui.status(_("crosschecking files in changesets and manifests\n"))
143 ui.status(_("crosschecking files in changesets and manifests\n"))
144
144
145 if havemf:
145 if havemf:
146 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
146 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
147 err(c, _("changeset refers to unknown manifest %s") % short(m))
147 err(c, _("changeset refers to unknown manifest %s") % short(m))
148 del mflinkrevs
148 del mflinkrevs
149
149
150 for f in util.sort(filelinkrevs):
150 for f in util.sort(filelinkrevs):
151 if f not in filenodes:
151 if f not in filenodes:
152 lr = filelinkrevs[f][0]
152 lr = filelinkrevs[f][0]
153 err(lr, _("in changeset but not in manifest"), f)
153 err(lr, _("in changeset but not in manifest"), f)
154
154
155 if havecl:
155 if havecl:
156 for f in util.sort(filenodes):
156 for f in util.sort(filenodes):
157 if f not in filelinkrevs:
157 if f not in filelinkrevs:
158 try:
158 try:
159 fl = repo.file(f)
159 fl = repo.file(f)
160 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
160 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
161 except:
161 except:
162 lr = None
162 lr = None
163 err(lr, _("in manifest but not in changeset"), f)
163 err(lr, _("in manifest but not in changeset"), f)
164
164
165 ui.status(_("checking files\n"))
165 ui.status(_("checking files\n"))
166
166
167 storefiles = {}
167 storefiles = {}
168 for f, f2, size in repo.store.datafiles():
168 for f, f2, size in repo.store.datafiles():
169 if not f:
169 if not f:
170 err(None, _("cannot decode filename '%s'") % f2)
170 err(None, _("cannot decode filename '%s'") % f2)
171 elif size > 0:
171 elif size > 0:
172 storefiles[f] = True
172 storefiles[f] = True
173
173
174 files = util.sort(util.unique(filenodes.keys() + filelinkrevs.keys()))
174 files = util.sort(set(filenodes.keys() + filelinkrevs.keys()))
175 for f in files:
175 for f in files:
176 lr = filelinkrevs[f][0]
176 lr = filelinkrevs[f][0]
177 try:
177 try:
178 fl = repo.file(f)
178 fl = repo.file(f)
179 except error.RevlogError, e:
179 except error.RevlogError, e:
180 err(lr, _("broken revlog! (%s)") % e, f)
180 err(lr, _("broken revlog! (%s)") % e, f)
181 continue
181 continue
182
182
183 for ff in fl.files():
183 for ff in fl.files():
184 try:
184 try:
185 del storefiles[ff]
185 del storefiles[ff]
186 except KeyError:
186 except KeyError:
187 err(lr, _("missing revlog!"), ff)
187 err(lr, _("missing revlog!"), ff)
188
188
189 checklog(fl, f)
189 checklog(fl, f)
190 seen = {}
190 seen = {}
191 for i in fl:
191 for i in fl:
192 revisions += 1
192 revisions += 1
193 n = fl.node(i)
193 n = fl.node(i)
194 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
194 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
195 if f in filenodes:
195 if f in filenodes:
196 if havemf and n not in filenodes[f]:
196 if havemf and n not in filenodes[f]:
197 err(lr, _("%s not in manifests") % (short(n)), f)
197 err(lr, _("%s not in manifests") % (short(n)), f)
198 else:
198 else:
199 del filenodes[f][n]
199 del filenodes[f][n]
200
200
201 # verify contents
201 # verify contents
202 try:
202 try:
203 t = fl.read(n)
203 t = fl.read(n)
204 rp = fl.renamed(n)
204 rp = fl.renamed(n)
205 if len(t) != fl.size(i):
205 if len(t) != fl.size(i):
206 if len(fl.revision(n)) != fl.size(i):
206 if len(fl.revision(n)) != fl.size(i):
207 err(lr, _("unpacked size is %s, %s expected") %
207 err(lr, _("unpacked size is %s, %s expected") %
208 (len(t), fl.size(i)), f)
208 (len(t), fl.size(i)), f)
209 except Exception, inst:
209 except Exception, inst:
210 exc(lr, _("unpacking %s") % short(n), inst, f)
210 exc(lr, _("unpacking %s") % short(n), inst, f)
211
211
212 # check renames
212 # check renames
213 try:
213 try:
214 if rp:
214 if rp:
215 fl2 = repo.file(rp[0])
215 fl2 = repo.file(rp[0])
216 if not len(fl2):
216 if not len(fl2):
217 err(lr, _("empty or missing copy source revlog %s:%s")
217 err(lr, _("empty or missing copy source revlog %s:%s")
218 % (rp[0], short(rp[1])), f)
218 % (rp[0], short(rp[1])), f)
219 elif rp[1] == nullid:
219 elif rp[1] == nullid:
220 warn(_("warning: %s@%s: copy source revision is nullid %s:%s")
220 warn(_("warning: %s@%s: copy source revision is nullid %s:%s")
221 % (f, lr, rp[0], short(rp[1])))
221 % (f, lr, rp[0], short(rp[1])))
222 else:
222 else:
223 fl2.rev(rp[1])
223 fl2.rev(rp[1])
224 except Exception, inst:
224 except Exception, inst:
225 exc(lr, _("checking rename of %s") % short(n), inst, f)
225 exc(lr, _("checking rename of %s") % short(n), inst, f)
226
226
227 # cross-check
227 # cross-check
228 if f in filenodes:
228 if f in filenodes:
229 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].iteritems()]
229 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].iteritems()]
230 for lr, node in util.sort(fns):
230 for lr, node in util.sort(fns):
231 err(lr, _("%s in manifests not found") % short(node), f)
231 err(lr, _("%s in manifests not found") % short(node), f)
232
232
233 for f in storefiles:
233 for f in storefiles:
234 warn(_("warning: orphan revlog '%s'") % f)
234 warn(_("warning: orphan revlog '%s'") % f)
235
235
236 ui.status(_("%d files, %d changesets, %d total revisions\n") %
236 ui.status(_("%d files, %d changesets, %d total revisions\n") %
237 (len(files), len(cl), revisions))
237 (len(files), len(cl), revisions))
238 if warnings[0]:
238 if warnings[0]:
239 ui.warn(_("%d warnings encountered!\n") % warnings[0])
239 ui.warn(_("%d warnings encountered!\n") % warnings[0])
240 if errors[0]:
240 if errors[0]:
241 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
241 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
242 if badrevs:
242 if badrevs:
243 ui.warn(_("(first damaged changeset appears to be %d)\n")
243 ui.warn(_("(first damaged changeset appears to be %d)\n")
244 % min(badrevs))
244 % min(badrevs))
245 return 1
245 return 1
General Comments 0
You need to be logged in to leave comments. Login now