diff --git a/CustomisedAudioRecording.py b/CustomisedAudioRecording.py
new file mode 100644
index 0000000..88b9c54
--- /dev/null
+++ b/CustomisedAudioRecording.py
@@ -0,0 +1,25 @@
+from SpecializeFileOperations import SpecializeFileOperations
+from Settings import Settings
+from Functions import Functions
+
+class CustomisedAudioRecording():
+ def __init__(self, pathSuffix):
+ self.LocalMainPath = Settings.GetValue(pathSuffix + "LocalMainPath")
+ self.GDriveMainPath = Settings.GetValue(pathSuffix + "GDriveMainPath")
+ self.CurrentRecordings = "Current Recordings"
+ self.GDriveRemovePath = Settings.GetValue(pathSuffix + "GDriveRemovePath")
+ self.LocalBackupPath = Settings.GetValue(pathSuffix + "LocalBackupPath")
+ self.GDriveBackupPath = Settings.GetValue(pathSuffix + "GDriveBackupPath")
+ self.SourceFiles = Functions.ScanAllFiles(self.LocalMainPath, False)
+
+ def PerformOperations(self):
+ organiseData = SpecializeFileOperations()
+ organiseSpecializeFolder = Functions.GetFolderPath(self.GDriveMainPath, self.CurrentRecordings)
+ if organiseSpecializeFolder:
+ organiseData.PerformSpecializeOperations(self.SourceFiles, self.LocalMainPath, organiseSpecializeFolder, self.GDriveRemovePath)
+ organiseData.ModifyFilesDataToOrganise(self.SourceFiles)
+ organiseData.OrganiseFiles(self.LocalMainPath, self.LocalBackupPath)
+ organiseData.OrganiseFiles(self.LocalMainPath, self.GDriveBackupPath)
+
+ def GetFiles(self):
+ return self.SourceFiles
\ No newline at end of file
diff --git a/CustomisedDatabase.py b/CustomisedDatabase.py
new file mode 100644
index 0000000..f9d9d62
--- /dev/null
+++ b/CustomisedDatabase.py
@@ -0,0 +1,72 @@
+import sqlite3
+
+from Settings import Settings
+
+class CustomisedDatabase():
+ def __init__(self):
+ connection = sqlite3.connect(Settings.GetValue("DatabasePath"))
+ self.connection = connection
+
+ def InsertVideos(self, insertionData):
+ if len(insertionData) > 0:
+ cursor = self.connection.cursor()
+ sortedData = sorted(insertionData, key = lambda current : current[1])
+ for (insertId, insertDateTime) in sortedData:
+ cursor.execute('''INSERT OR IGNORE INTO VideoRecordingData (VideoYoutubeId, VideoRecordingDate, VideoDataModified) VALUES (?,?,?)''', (str(insertId),str(insertDateTime),"Y"))
+ self.connection.commit()
+
+ def GetVideosToOrganise(self):
+ cursor = self.connection.cursor()
+ cursor.execute('''SELECT VideoYoutubeId, VideoRecordingDate FROM VideoRecordingData WHERE VideoShastraName is null OR VideoShastraName = '' ''')
+ returnDate = [(fetch_data[0], fetch_data[1]) for fetch_data in cursor]
+ sortedData = sorted(returnDate, key = lambda current : current[1])
+ return sortedData
+
+ def UpdateShastraName(self, videoId, videoShastraName):
+ cursor = self.connection.cursor()
+ cursor.execute('''UPDATE VideoRecordingData SET VideoShastraName = ?, VideoDataModified = ? WHERE VideoYoutubeId = ?''', (str(videoShastraName), "Y", str(videoId)))
+ self.connection.commit()
+
+ def UpdateInternalReferences(self):
+ cursor = self.connection.cursor()
+ cursor.execute('''SELECT VideoId FROM VideoRecordingData WHERE VideoDataModified = ? ''', ("Y",))
+ videoIdToProcess = [fetch_data[0] for fetch_data in cursor]
+ for currentVideoId in videoIdToProcess:
+ cursor = self.connection.cursor()
+ cursor.execute('''SELECT VideoShastraName, VideoPrevId, VideoNextId, VideoYoutubeId FROM VideoRecordingData WHERE VideoId = ?''', (str(currentVideoId),))
+ fetchRecord = cursor.fetchone()
+ VideoShastraName, VideoPrevId, VideoNextId, VideoYoutubeId = str(fetchRecord[0]), fetchRecord[1], fetchRecord[2], fetchRecord[3]
+ #VideoPrevId
+ cursor = self.connection.cursor()
+ cursor.execute('''SELECT VideoYoutubeId, VideoId FROM VideoRecordingData WHERE VideoId < ? AND VideoShastraName = ? ORDER BY VideoId DESC''', (currentVideoId, VideoShastraName))
+ fetchRecord = cursor.fetchone()
+ if fetchRecord:
+ PreviousVideoYoutubeId, PreviousVideoId = fetchRecord[0], fetchRecord[1]
+ cursor = self.connection.cursor()
+ cursor.execute('''UPDATE VideoRecordingData SET VideoPrevId = ?, VideoDataModified = ? WHERE VideoId = ?''', (str(PreviousVideoYoutubeId), "Y", str(currentVideoId)))
+ cursor = self.connection.cursor()
+ cursor.execute('''UPDATE VideoRecordingData SET VideoNextId = ?, VideoDataModified = ? WHERE VideoId = ?''', (str(VideoYoutubeId), "Y", str(PreviousVideoId)))
+ #VideoNextId:
+ cursor = self.connection.cursor()
+ cursor.execute('''SELECT VideoYoutubeId, VideoId FROM VideoRecordingData WHERE VideoId > ? AND VideoShastraName = ? ORDER BY VideoId''', (currentVideoId, VideoShastraName))
+ fetchRecord = cursor.fetchone()
+ if fetchRecord:
+ NextVideoYoutubeId, NextVideoId = fetchRecord[0], fetchRecord[1]
+ cursor = self.connection.cursor()
+ cursor.execute('''UPDATE VideoRecordingData SET VideoNextId = ?, VideoDataModified = ? WHERE VideoId = ?''', (str(NextVideoYoutubeId), "Y", str(currentVideoId)))
+ cursor = self.connection.cursor()
+ cursor.execute('''UPDATE VideoRecordingData SET VideoPrevId = ?, VideoDataModified = ? WHERE VideoId = ?''', (str(VideoYoutubeId), "Y", str(NextVideoId)))
+ self.connection.commit()
+
+ def FetchDataToUpdate(self):
+ cursor = self.connection.cursor()
+ cursor.execute('''SELECT VideoId, VideoYoutubeId, VideoRecordingDate, VideoShastraName, VideoPrevId, VideoNextId FROM VideoRecordingData WHERE VideoDataModified = "Y" ''')
+ for fetch_data in cursor:
+ self.updateVideoId = fetch_data[0]
+ yield (fetch_data[0], fetch_data[1], fetch_data[2], fetch_data[3], fetch_data[4], fetch_data[5])
+
+ def SetUpdateCompleted(self, updateVideoId = None):
+ updateVideoId = self.updateVideoId if updateVideoId is None else updateVideoId
+ cursor = self.connection.cursor()
+ cursor.execute('''UPDATE VideoRecordingData SET VideoDataModified = ? WHERE VideoId = ?''', ("", updateVideoId))
+ self.connection.commit()
diff --git a/CustomisedFileOperations.py b/CustomisedFileOperations.py
new file mode 100644
index 0000000..774c1f2
--- /dev/null
+++ b/CustomisedFileOperations.py
@@ -0,0 +1,22 @@
+from os import path, sep, makedirs
+from shutil import copy2 as copy
+
+from Functions import Functions
+
+class CustomisedFileOperations():
+ def ModifyFilesDataToOrganise(self, allFiles):
+ allFilesToOrganise = []
+ [allFilesToOrganise.append((files, Functions.GenerateFileOrganisePath(files))) for files in allFiles]
+ self.fileData = allFilesToOrganise
+
+ def OrganiseFiles(self, sourcePath, destinationPath):
+ organiseFiles = [(sourceFile, destFile) for (sourceFile, destFile) in self.fileData if not path.exists(path.join(destinationPath, destFile))]
+ for (sourceFile, destFilePath) in organiseFiles:
+ completeSourceFilePath = path.join(sourcePath, sourceFile)
+ completeDestinationFilePath = path.join(destinationPath, destFilePath)
+ destFileData = destFilePath.split(sep)
+ destFileName = destFileData.pop()
+ destFolderPath = path.join(destinationPath, sep.join(destFileData))
+ if not path.exists(destFolderPath):
+ makedirs(destFolderPath)
+ copy(completeSourceFilePath, completeDestinationFilePath)
\ No newline at end of file
diff --git a/CustomisedWebDriver.py b/CustomisedWebDriver.py
new file mode 100644
index 0000000..0ee70e4
--- /dev/null
+++ b/CustomisedWebDriver.py
@@ -0,0 +1,27 @@
+from selenium import webdriver
+from selenium.webdriver.common.by import By
+from selenium.webdriver.common.keys import Keys
+from selenium.webdriver.support import expected_conditions
+from selenium.webdriver.support.ui import WebDriverWait
+
+class CustomisedWebDriver(webdriver.Chrome):
+ def SendKeys(self, inputData):
+ self.currentElement.clear()
+ self.currentElement.send_keys(inputData)
+ self.currentElement.send_keys(Keys.RETURN)
+
+ def Click(self):
+ self.currentElement.click()
+
+ def LocateByPath(self, locatorString):
+ webWait = WebDriverWait(self, 60)
+ element = webWait.until(expected_conditions.element_to_be_clickable((By.XPATH, locatorString)))
+ self.currentElement = element
+
+ def LaunchURL(self, urlToLaunch):
+ #self.set_window_position(-10000,0)
+ self.implicitly_wait(20)
+ self.get(urlToLaunch)
+
+ def GetAuthCode(self):
+ return self.find_element_by_tag_name("textarea").text
\ No newline at end of file
diff --git a/CustomisedYoutubeService.py b/CustomisedYoutubeService.py
new file mode 100644
index 0000000..c163c5e
--- /dev/null
+++ b/CustomisedYoutubeService.py
@@ -0,0 +1,139 @@
+#C:\Python27\Lib\site-packages\google_auth_oauthlib - Changes in flow.py
+from googleapiclient.discovery import build
+from google_auth_oauthlib.flow import InstalledAppFlow
+
+import datetime
+import json
+
+from CustomisedWebDriver import CustomisedWebDriver
+from Settings import Settings
+
+class CustomisedYoutubeService():
+ SCOPES = ['https://www.googleapis.com/auth/youtube']
+ API_SERVICE_NAME = 'youtube'
+ API_VERSION = 'v3'
+ YOUTUBEVIDEOSUFFIX = "https://www.youtube.com/watch?v="
+
+ def __init__(self):
+ client_config = json.loads(Settings.GetValue("YoutubeSecret"))
+ appFlow = InstalledAppFlow.from_client_config(client_config, CustomisedYoutubeService.SCOPES)
+ urlForAuth = appFlow.run_console()
+ authCode = CustomisedYoutubeService.GetAuthenticationCode(urlForAuth)
+ credentialsForAuth = appFlow.run_console_rest(authCode)
+ self.youtubeService = build(CustomisedYoutubeService.API_SERVICE_NAME, CustomisedYoutubeService.API_VERSION, credentials = credentialsForAuth)
+
+ @staticmethod
+ def GetAuthenticationCode(authUrl):
+ webDriver = CustomisedWebDriver()
+ webDriver.LaunchURL(authUrl)
+ webDriver.LocateByPath("//input[@type='email']")
+ webDriver.SendKeys(Settings.GetValue("GUsername"))
+ webDriver.LocateByPath("//input[@type='password']")
+ webDriver.SendKeys(Settings.GetValue("GPassword"))
+ webDriver.LocateByPath("//*[contains(text(), '" + Settings.GetValue("YTChannelName") + "')]")
+ webDriver.Click()
+ webDriver.LocateByPath("//a[contains(text(), 'Advanced')]")
+ webDriver.Click()
+ webDriver.LocateByPath("//a[contains(text(), 'Go to python test project (unsafe)')]")
+ webDriver.Click()
+ webDriver.LocateByPath("//span[contains(text(), 'Allow')]")
+ webDriver.Click()
+ code = webDriver.GetAuthCode()
+ webDriver.quit()
+ return code
+
+ @staticmethod
+ def ConvertToISTTime(dateTimeString):
+ parsedDateTime = datetime.datetime.strptime(dateTimeString, "%Y-%m-%dT%H:%M:%S.%fZ")
+ parsedDateTime = parsedDateTime + datetime.timedelta(hours=5,minutes=30)
+ return parsedDateTime.strftime("%Y%m%d %H%M%S")
+
+ def UpdateVideoInformation(self, videoId = None, videoTitle = None, videoDescription = None, videoRecordingDate = None):
+ videoId = self.videoYoutubeId if videoId is None else videoId
+ videoTitle = self.videoTitle if videoTitle is None else videoTitle
+ videoDescription = self.videoDescription if videoDescription is None else videoDescription
+ videoRecordingDate = self.videoRecordingDate if videoRecordingDate is None else videoRecordingDate
+ queryReturnParts = "snippet,recordingDetails"
+ videoToUpdate = self.youtubeService.videos().list(
+ id = videoId,
+ part = queryReturnParts
+ ).execute()
+ if not videoToUpdate[u"items"]:
+ return False
+ videoSnippet = videoToUpdate[u"items"][0][u"snippet"]
+ videoRecordingDetails = dict()
+ if videoTitle:
+ videoSnippet[u"title"] = videoTitle
+ if videoDescription:
+ videoSnippet[u"description"] = videoDescription
+ if videoRecordingDate:
+ videoRecordingDetails[u"recordingDate"] = videoRecordingDate
+ if u"tags" not in videoSnippet:
+ videoSnippet[u"tags"] = []
+ videos_update_response = self.youtubeService.videos().update(
+ part = queryReturnParts,
+ body = dict(
+ snippet = videoSnippet,
+ recordingDetails = videoRecordingDetails,
+ id = videoId)
+ ).execute()
+ print(videoId)
+ print(videoTitle)
+ print(videoDescription)
+ print("----------------")
+ return True
+
+ def GetVideoIDs(self, searchString):
+ queryReturnParts = "id,snippet"
+ orderString = "date"
+ queryString = searchString
+ nextPageToken = ""
+ responseData = []
+ while True:
+ response = self.youtubeService.search().list(
+ part = queryReturnParts,
+ channelId = Settings.GetValue("YTChannelID"),
+ order = orderString,
+ q = queryString,
+ pageToken = nextPageToken
+ ).execute()
+ for currentResponseItems in response["items"]:
+ if u"videoId" in currentResponseItems[u"id"].keys():
+ currentVideoId = currentResponseItems[u"id"][u"videoId"]
+ responseData.append(currentVideoId)
+ if u"nextPageToken" in response.keys():
+ nextPageToken = response[u"nextPageToken"]
+ else:
+ break
+ return responseData
+
+ def GetVideoStartTimeDetails(self, inputList):
+ queryReturnParts = "id,liveStreamingDetails"
+ idToFetch = ",".join(inputList)
+ responseData = []
+ nextPageToken = ""
+ while True:
+ response = self.youtubeService.videos().list(
+ part = queryReturnParts,
+ id = idToFetch,
+ pageToken = nextPageToken
+ ).execute()
+ for currentResponseItems in response["items"]:
+ responseData.append((str(currentResponseItems[u"id"]), (CustomisedYoutubeService.ConvertToISTTime(currentResponseItems[u"liveStreamingDetails"][u"actualStartTime"]))))
+ if u"nextPageToken" in response.keys():
+ nextPageToken = response[u"nextPageToken"]
+ else:
+ break
+ return responseData
+
+ def SetVideoDetails(self, dataTuple, videoType):
+ VideoId = videoType + str(dataTuple[0])
+ VideoRecordingDate = datetime.datetime.strptime(dataTuple[2], "%Y%m%d %H%M%S").strftime("%d %B %Y")
+ VideoShastraNameTitle = dataTuple[3] if dataTuple[3] is not None else "Vanchan"
+ VideoShastraNameDesc = dataTuple[3] if dataTuple[3] is not None else "TBD"
+ VideoPrevId = "TBD" if not dataTuple[4] else str(CustomisedYoutubeService.YOUTUBEVIDEOSUFFIX + dataTuple[4])
+ VideoNextId = "TBD" if not dataTuple[5] else str(CustomisedYoutubeService.YOUTUBEVIDEOSUFFIX + dataTuple[5])
+ self.videoYoutubeId = dataTuple[1]
+ self.videoTitle = VideoShastraNameTitle + " || " + VideoRecordingDate + " || " + VideoId + " || Live Stream"
+ self.videoDescription = "Shree Adinath Digambar Jain Mandir, Rajkot\nLive Stream\nPrevious Video Link: " + VideoPrevId + "\nShastra: " + VideoShastraNameDesc + "\nRecording Date: " + VideoRecordingDate + "\nRecording Number: " + VideoId + "\nNext Video Link: " + VideoNextId
+ self.videoRecordingDate = datetime.datetime.strptime(dataTuple[2], "%Y%m%d %H%M%S").strftime("%Y-%m-%dT%H:%M:%S.%fZ")
\ No newline at end of file
diff --git a/Execute.bat b/Execute.bat
new file mode 100644
index 0000000..33ae301
--- /dev/null
+++ b/Execute.bat
@@ -0,0 +1,3 @@
+@echo off
+python Program.py
+timeout 15
\ No newline at end of file
diff --git a/Functions.py b/Functions.py
new file mode 100644
index 0000000..1b78463
--- /dev/null
+++ b/Functions.py
@@ -0,0 +1,71 @@
+from os import walk, path, sep
+import datetime
+
+class Functions():
+ @staticmethod
+ def GetFolderPath(directoryPath, specializeString):
+ specializeFolderPath = None
+ for (dirpath, dirnames, filenames) in walk(directoryPath):
+ for dir in dirnames:
+ if dir.startswith(specializeString):
+ specializeFolderPath = path.join(dirpath, dir)
+ return specializeFolderPath
+
+ @staticmethod
+ def DeleteEmptyFolders(destinationPath):
+ for (dirpath, dirnames, filenames) in walk(destinationPath):
+ if dirpath == destinationPath:
+ continue
+ if len(filenames) == 1 and filenames[0] == "desktop.ini":
+ print("DeleteEmptyFolders1")
+ try:
+ remove(path.join(dirpath, filenames[0]))
+ rmdir(dirpath)
+ except OSError:
+ continue
+ if len(filenames) == 0:
+ print("DeleteEmptyFolders0")
+ try:
+ rmdir(dirpath)
+ except OSError:
+ continue
+
+ @staticmethod
+ def ScanAllFiles(directoryPath, includeFolder = True):
+ files = []
+ for (dirpath, dirnames, filenames) in walk(directoryPath):
+ directory_path_relative = path.relpath(dirpath, directoryPath)
+ if includeFolder:
+ [files.append(path.join(directory_path_relative, dir)) for dir in dirnames]
+ [files.append(path.join(directory_path_relative, file)) for file in filenames if file != "desktop.ini"]
+ return files
+
+ @staticmethod
+ def GenerateFileOrganisePath(fileRelativePath):
+ monthFolder = { 1 : '01 JAN', 2 : '02 FEB', 3 : '03 MAR', 4 : '04 APR', 5 : '05 MAY', 6 : '06 JUN', 7 : '07 JUL', 8 : '08 AUG', 9 : '09 SEP', 10 : '10 OCT', 11 : '11 NOV', 12 : '12 DEC' }
+ fileData = fileRelativePath.split(sep)
+ folderShastraName, fileName = fileData[0], fileData[1]
+ fileRecDate = datetime.datetime.strptime(fileName[4:12],'%Y%m%d').date()
+ folderYear = str(fileRecDate.year) + '-01 To ' + str(fileRecDate.year) + '-12'
+ folderMon = monthFolder.get(fileRecDate.month, '00 MON') + '-' + str(fileRecDate.year)
+ return path.join(folderYear, folderMon, folderShastraName, fileName)
+
+ @staticmethod
+ def GetShastraName(videoStartDate, filesData):
+ videoDate = videoStartDate.split(' ')
+ organise_file_date = videoDate[0]
+ organise_file_time = int(videoDate[1])
+ probable_source_file = []
+ for source_file in filesData:
+ if '.mp3' not in source_file:
+ continue
+ source_file_name = source_file.split(sep)[-1]
+ source_file_date = source_file_name[4:12]
+ source_file_time = int(source_file_name[13:19])
+ if((organise_file_date == source_file_date) and (abs(source_file_time - organise_file_time) < (5 * 60))):
+ probable_source_file.append(source_file)
+ if len(probable_source_file) == 1:
+ source_file_path = probable_source_file[0]
+ if not source_file_path.find(sep) == -1:
+ return source_file_path[:source_file_path.index(sep)]
+ return None
\ No newline at end of file
diff --git a/Program.py b/Program.py
new file mode 100644
index 0000000..4e5d290
--- /dev/null
+++ b/Program.py
@@ -0,0 +1,55 @@
+#pip install google-api-python-client
+#pip install google_auth_oauthlib
+from collections import OrderedDict
+import datetime
+from os import path, walk, sep
+
+from CustomisedYoutubeService import CustomisedYoutubeService
+from CustomisedDatabase import CustomisedDatabase
+from CustomisedAudioRecording import CustomisedAudioRecording
+from Settings import Settings
+from Functions import Functions
+
+try:
+ Settings.RegisterPath()
+ Settings.RefreshData()
+ print("Organising Local Data...")
+ print("Organising Audio Recordings...")
+ print("Organising Folder Alpha...")
+ alphaData = CustomisedAudioRecording("Alpha")
+ alphaData.PerformOperations()
+ alphaSourceFiles = alphaData.GetFiles()
+ print("Organising Folder Beta...")
+ betaData = CustomisedAudioRecording("Beta")
+ betaData.PerformOperations()
+ betaSourceFiles = betaData.GetFiles()
+ print("Organising Folder Gamma...")
+ gammaData = CustomisedAudioRecording("Gamma")
+ gammaData.PerformOperations()
+ gammaSourceFiles = gammaData.GetFiles()
+ print("Connecting to different services...")
+ database = CustomisedDatabase()
+ youtube = CustomisedYoutubeService()
+ print("Fetching new Videos...")
+ videoList = youtube.GetVideoIDs("Streaming")
+ videoDetails = youtube.GetVideoStartTimeDetails(videoList)
+ print("Adding new Videos...")
+ database.InsertVideos(videoDetails)
+ print("Processing Videos...")
+ videoDataToProcess = database.GetVideosToOrganise()
+ print("Updating Shastra Name for Videos...")
+ for (videoId, videoStartDate) in videoDataToProcess:
+ shastraName = Functions.GetShastraName(videoStartDate, alphaSourceFiles)
+ if shastraName:
+ database.UpdateShastraName(videoId, shastraName)
+ print("Updating Links for Videos...")
+ database.UpdateInternalReferences()
+ print("Updating Video Data on Youtube...")
+ for updateData in database.FetchDataToUpdate():
+ youtube.SetVideoDetails(updateData, "L")
+ isUpdateSuccess = youtube.UpdateVideoInformation()
+ if isUpdateSuccess:
+ database.SetUpdateCompleted()
+except Exception as exp:
+ print ('An error occurred: ')
+ print (exp)
\ No newline at end of file
diff --git a/Settings.py b/Settings.py
new file mode 100644
index 0000000..807ec2e
--- /dev/null
+++ b/Settings.py
@@ -0,0 +1,25 @@
+import sqlite3
+import os
+
+class Settings():
+ Data = dict()
+ DBPATH = "Youtube_Recoding_Data"
+
+ @classmethod
+ def RegisterPath(cls):
+ pathToRegister = os.path.dirname(__file__) + os.sep + "webdrivers"
+ os.environ["PATH"] += os.pathsep + pathToRegister
+
+ @classmethod
+ def GetValue(cls, key):
+ return cls.Data[key] if key in cls.Data.keys() else None
+
+ @classmethod
+ def RefreshData(cls):
+ connection = sqlite3.connect(cls.DBPATH)
+ cursor = connection.cursor()
+ cursor.execute('''SELECT SettingsKey, SettingsValue FROM SettingsData''')
+ cls.Data = dict()
+ cls.Data["DatabasePath"] = cls.DBPATH
+ for fetch_data in cursor:
+ cls.Data[fetch_data[0]] = str(fetch_data[1])
\ No newline at end of file
diff --git a/SpecializeFileOperations.py b/SpecializeFileOperations.py
new file mode 100644
index 0000000..9f2cc8e
--- /dev/null
+++ b/SpecializeFileOperations.py
@@ -0,0 +1,65 @@
+import datetime
+from os import walk, path, sep, makedirs, rename
+from shutil import copy2 as copy
+
+from CustomisedFileOperations import CustomisedFileOperations
+from Functions import Functions
+
+class SpecializeFileOperations(CustomisedFileOperations):
+ def GetFiles(self, allFiles):
+ startDate = datetime.datetime.now().date()-datetime.timedelta(days=9)
+ specializeFiles = []
+ [specializeFiles.append(files) for files in allFiles if datetime.datetime.strptime(files.split(sep)[-1][4:12],'%Y%m%d').date() > startDate]
+ sortedSpecializeFiles = sorted(specializeFiles, key = lambda current : current.split(sep)[-1][4:12])
+ self.specializeFiles = sortedSpecializeFiles
+ self.GetDates()
+
+ def GetDates(self):
+ self.specializeMinDate = None
+ self.specializeMaxDate = None
+ if len(self.specializeFiles) > 0:
+ self.specializeMinDate = datetime.datetime.strptime(self.specializeFiles[0].split(sep)[-1][4:12],'%Y%m%d').strftime('%Y-%m-%d')
+ self.specializeMaxDate = datetime.datetime.strptime(self.specializeFiles[(len(self.specializeFiles)-1)].split(sep)[-1][4:12],'%Y%m%d').strftime('%Y-%m-%d')
+
+ def AddData(self, sourcePath, destinationPath):
+ for file in self.specializeFiles:
+ completeSourceFilePath = path.join(sourcePath, file)
+ completeDestinationFilePath = path.join(destinationPath, file)
+ if not path.exists(completeDestinationFilePath):
+ folderPathToCheck = completeDestinationFilePath.split(sep)
+ fileName = folderPathToCheck.pop()
+ folderPath = sep.join(folderPathToCheck)
+ if not path.exists(folderPath):
+ makedirs(folderPath)
+ copy(completeSourceFilePath, completeDestinationFilePath)
+
+ def MoveData(self, destinationPath, movePath):
+ currentFileContents = Functions.ScanAllFiles(destinationPath, False)
+ dataToRemove = [file for file in currentFileContents if file not in self.specializeFiles]
+ if len(dataToRemove) > 0:
+ for file in dataToRemove:
+ completeDestinationFilePath = path.join(destinationPath, file)
+ completeMoveFilePath = path.join(movePath, file)
+ folderPathToCheck = completeMoveFilePath.split(sep)
+ fileName = folderPathToCheck.pop()
+ folderPath = sep.join(folderPathToCheck)
+ if not path.exists(folderPath):
+ makedirs(folderPath)
+ rename(completeDestinationFilePath, completeMoveFilePath)
+
+ def UpdateFolderName(self, destinationPath, specializeString):
+ destinationFolderPath = destinationPath.split(sep)
+ destinationFolderPath.pop()
+ if self.specializeMinDate and self.specializeMaxDate:
+ destinationFolderPath.append(specializeString + " [" + self.specializeMinDate + " To " + self.specializeMaxDate + "]")
+ else:
+ destinationFolderPath.append(specializeString)
+ specializationPath = sep.join(destinationFolderPath)
+ rename(destinationPath, specializationPath)
+
+ def PerformSpecializeOperations(self, allFiles, sourcePath, specializeFolder, movePath):
+ self.GetFiles(allFiles)
+ self.AddData(sourcePath, specializeFolder)
+ self.MoveData(specializeFolder, movePath)
+ Functions.DeleteEmptyFolders(specializeFolder)
+ self.UpdateFolderName(specializeFolder, "Current Recordings")
\ No newline at end of file
diff --git a/google_auth_oauthlib/__init__.py b/google_auth_oauthlib/__init__.py
new file mode 100644
index 0000000..1905f9a
--- /dev/null
+++ b/google_auth_oauthlib/__init__.py
@@ -0,0 +1,23 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""oauthlib integration for Google Auth
+
+This library provides `oauthlib `__
+integration with `google-auth `__.
+"""
+
+from .interactive import get_user_credentials
+
+__all__ = ["get_user_credentials"]
diff --git a/google_auth_oauthlib/flow.py b/google_auth_oauthlib/flow.py
new file mode 100644
index 0000000..a53bed6
--- /dev/null
+++ b/google_auth_oauthlib/flow.py
@@ -0,0 +1,484 @@
+# Copyright 2016 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 Authorization Flow
+
+This module provides integration with `requests-oauthlib`_ for running the
+`OAuth 2.0 Authorization Flow`_ and acquiring user credentials.
+
+Here's an example of using :class:`Flow` with the installed application
+authorization flow::
+
+ from google_auth_oauthlib.flow import Flow
+
+ # Create the flow using the client secrets file from the Google API
+ # Console.
+ flow = Flow.from_client_secrets_file(
+ 'path/to/client_secrets.json',
+ scopes=['profile', 'email'],
+ redirect_uri='urn:ietf:wg:oauth:2.0:oob')
+
+ # Tell the user to go to the authorization URL.
+ auth_url, _ = flow.authorization_url(prompt='consent')
+
+ print('Please go to this URL: {}'.format(auth_url))
+
+ # The user will get an authorization code. This code is used to get the
+ # access token.
+ code = input('Enter the authorization code: ')
+ flow.fetch_token(code=code)
+
+ # You can use flow.credentials, or you can just get a requests session
+ # using flow.authorized_session.
+ session = flow.authorized_session()
+ print(session.get('https://www.googleapis.com/userinfo/v2/me').json())
+
+This particular flow can be handled entirely by using
+:class:`InstalledAppFlow`.
+
+.. _requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/stable/
+.. _OAuth 2.0 Authorization Flow:
+ https://tools.ietf.org/html/rfc6749#section-1.2
+"""
+from base64 import urlsafe_b64encode
+import hashlib
+import json
+import logging
+try:
+ from secrets import SystemRandom
+except ImportError: # pragma: NO COVER
+ from random import SystemRandom
+from string import ascii_letters, digits
+import webbrowser
+import wsgiref.simple_server
+import wsgiref.util
+
+import google.auth.transport.requests
+import google.oauth2.credentials
+from six.moves import input
+
+import google_auth_oauthlib.helpers
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Flow(object):
+ """OAuth 2.0 Authorization Flow
+
+ This class uses a :class:`requests_oauthlib.OAuth2Session` instance at
+ :attr:`oauth2session` to perform all of the OAuth 2.0 logic. This class
+ just provides convenience methods and sane defaults for doing Google's
+ particular flavors of OAuth 2.0.
+
+ Typically you'll construct an instance of this flow using
+ :meth:`from_client_secrets_file` and a `client secrets file`_ obtained
+ from the `Google API Console`_.
+
+ .. _client secrets file:
+ https://developers.google.com/identity/protocols/OAuth2WebServer
+ #creatingcred
+ .. _Google API Console:
+ https://console.developers.google.com/apis/credentials
+ """
+
+ def __init__(
+ self, oauth2session, client_type, client_config,
+ redirect_uri=None, code_verifier=None):
+ """
+ Args:
+ oauth2session (requests_oauthlib.OAuth2Session):
+ The OAuth 2.0 session from ``requests-oauthlib``.
+ client_type (str): The client type, either ``web`` or
+ ``installed``.
+ client_config (Mapping[str, Any]): The client
+ configuration in the Google `client secrets`_ format.
+ redirect_uri (str): The OAuth 2.0 redirect URI if known at flow
+ creation time. Otherwise, it will need to be set using
+ :attr:`redirect_uri`.
+ code_verifier (str): random string of 43-128 chars used to verify
+ the key exchange.using PKCE. Auto-generated if not provided.
+
+ .. _client secrets:
+ https://developers.google.com/api-client-library/python/guide
+ /aaa_client_secrets
+ """
+ self.client_type = client_type
+ """str: The client type, either ``'web'`` or ``'installed'``"""
+ self.client_config = client_config[client_type]
+ """Mapping[str, Any]: The OAuth 2.0 client configuration."""
+ self.oauth2session = oauth2session
+ """requests_oauthlib.OAuth2Session: The OAuth 2.0 session."""
+ self.redirect_uri = redirect_uri
+ self.code_verifier = code_verifier
+
+ @classmethod
+ def from_client_config(cls, client_config, scopes, **kwargs):
+ """Creates a :class:`requests_oauthlib.OAuth2Session` from client
+ configuration loaded from a Google-format client secrets file.
+
+ Args:
+ client_config (Mapping[str, Any]): The client
+ configuration in the Google `client secrets`_ format.
+ scopes (Sequence[str]): The list of scopes to request during the
+ flow.
+ kwargs: Any additional parameters passed to
+ :class:`requests_oauthlib.OAuth2Session`
+
+ Returns:
+ Flow: The constructed Flow instance.
+
+ Raises:
+ ValueError: If the client configuration is not in the correct
+ format.
+
+ .. _client secrets:
+ https://developers.google.com/api-client-library/python/guide
+ /aaa_client_secrets
+ """
+ if 'web' in client_config:
+ client_type = 'web'
+ elif 'installed' in client_config:
+ client_type = 'installed'
+ else:
+ raise ValueError(
+ 'Client secrets must be for a web or installed app.')
+
+ session, client_config = (
+ google_auth_oauthlib.helpers.session_from_client_config(
+ client_config, scopes, **kwargs))
+
+ redirect_uri = kwargs.get('redirect_uri', None)
+ return cls(session, client_type, client_config, redirect_uri)
+
+ @classmethod
+ def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
+ """Creates a :class:`Flow` instance from a Google client secrets file.
+
+ Args:
+ client_secrets_file (str): The path to the client secrets .json
+ file.
+ scopes (Sequence[str]): The list of scopes to request during the
+ flow.
+ kwargs: Any additional parameters passed to
+ :class:`requests_oauthlib.OAuth2Session`
+
+ Returns:
+ Flow: The constructed Flow instance.
+ """
+ with open(client_secrets_file, 'r') as json_file:
+ client_config = json.load(json_file)
+
+ return cls.from_client_config(client_config, scopes=scopes, **kwargs)
+
+ @property
+ def redirect_uri(self):
+ """The OAuth 2.0 redirect URI. Pass-through to
+ ``self.oauth2session.redirect_uri``."""
+ return self.oauth2session.redirect_uri
+
+ @redirect_uri.setter
+ def redirect_uri(self, value):
+ self.oauth2session.redirect_uri = value
+
+ def authorization_url(self, **kwargs):
+ """Generates an authorization URL.
+
+ This is the first step in the OAuth 2.0 Authorization Flow. The user's
+ browser should be redirected to the returned URL.
+
+ This method calls
+ :meth:`requests_oauthlib.OAuth2Session.authorization_url`
+ and specifies the client configuration's authorization URI (usually
+ Google's authorization server) and specifies that "offline" access is
+ desired. This is required in order to obtain a refresh token.
+
+ Args:
+ kwargs: Additional arguments passed through to
+ :meth:`requests_oauthlib.OAuth2Session.authorization_url`
+
+ Returns:
+ Tuple[str, str]: The generated authorization URL and state. The
+ user must visit the URL to complete the flow. The state is used
+ when completing the flow to verify that the request originated
+ from your application. If your application is using a different
+ :class:`Flow` instance to obtain the token, you will need to
+ specify the ``state`` when constructing the :class:`Flow`.
+ """
+ kwargs.setdefault('access_type', 'offline')
+ if not self.code_verifier:
+ chars = ascii_letters+digits+'-._~'
+ rnd = SystemRandom()
+ random_verifier = [rnd.choice(chars) for _ in range(0, 128)]
+ self.code_verifier = ''.join(random_verifier)
+ code_hash = hashlib.sha256()
+ code_hash.update(str.encode(self.code_verifier))
+ unencoded_challenge = code_hash.digest()
+ b64_challenge = urlsafe_b64encode(unencoded_challenge)
+ code_challenge = b64_challenge.decode().split('=')[0]
+ kwargs.setdefault('code_challenge', code_challenge)
+ kwargs.setdefault('code_challenge_method', 'S256')
+ url, state = self.oauth2session.authorization_url(
+ self.client_config['auth_uri'], **kwargs)
+
+ return url, state
+
+ def fetch_token(self, **kwargs):
+ """Completes the Authorization Flow and obtains an access token.
+
+ This is the final step in the OAuth 2.0 Authorization Flow. This is
+ called after the user consents.
+
+ This method calls
+ :meth:`requests_oauthlib.OAuth2Session.fetch_token`
+ and specifies the client configuration's token URI (usually Google's
+ token server).
+
+ Args:
+ kwargs: Arguments passed through to
+ :meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least
+ one of ``code`` or ``authorization_response`` must be
+ specified.
+
+ Returns:
+ Mapping[str, str]: The obtained tokens. Typically, you will not use
+ return value of this function and instead and use
+ :meth:`credentials` to obtain a
+ :class:`~google.auth.credentials.Credentials` instance.
+ """
+ kwargs.setdefault('client_secret', self.client_config['client_secret'])
+ kwargs.setdefault('code_verifier', self.code_verifier)
+ return self.oauth2session.fetch_token(
+ self.client_config['token_uri'], **kwargs)
+
+ @property
+ def credentials(self):
+ """Returns credentials from the OAuth 2.0 session.
+
+ :meth:`fetch_token` must be called before accessing this. This method
+ constructs a :class:`google.oauth2.credentials.Credentials` class using
+ the session's token and the client config.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: If there is no access token in the session.
+ """
+ return google_auth_oauthlib.helpers.credentials_from_session(
+ self.oauth2session, self.client_config)
+
+ def authorized_session(self):
+ """Returns a :class:`requests.Session` authorized with credentials.
+
+ :meth:`fetch_token` must be called before this method. This method
+ constructs a :class:`google.auth.transport.requests.AuthorizedSession`
+ class using this flow's :attr:`credentials`.
+
+ Returns:
+ google.auth.transport.requests.AuthorizedSession: The constructed
+ session.
+ """
+ return google.auth.transport.requests.AuthorizedSession(
+ self.credentials)
+
+
+class InstalledAppFlow(Flow):
+ """Authorization flow helper for installed applications.
+
+ This :class:`Flow` subclass makes it easier to perform the
+ `Installed Application Authorization Flow`_. This flow is useful for
+ local development or applications that are installed on a desktop operating
+ system.
+
+ This flow has two strategies: The console strategy provided by
+ :meth:`run_console` and the local server strategy provided by
+ :meth:`run_local_server`.
+
+ Example::
+
+ from google_auth_oauthlib.flow import InstalledAppFlow
+
+ flow = InstalledAppFlow.from_client_secrets_file(
+ 'client_secrets.json',
+ scopes=['profile', 'email'])
+
+ flow.run_local_server()
+
+ session = flow.authorized_session()
+
+ profile_info = session.get(
+ 'https://www.googleapis.com/userinfo/v2/me').json()
+
+ print(profile_info)
+ # {'name': '...', 'email': '...', ...}
+
+
+ Note that these aren't the only two ways to accomplish the installed
+ application flow, they are just the most common ways. You can use the
+ :class:`Flow` class to perform the same flow with different methods of
+ presenting the authorization URL to the user or obtaining the authorization
+ response, such as using an embedded web view.
+
+ .. _Installed Application Authorization Flow:
+ https://developers.google.com/api-client-library/python/auth
+ /installed-app
+ """
+ _OOB_REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
+
+ _DEFAULT_AUTH_PROMPT_MESSAGE = (
+ 'Please visit this URL to authorize this application: {url}')
+ """str: The message to display when prompting the user for
+ authorization."""
+ _DEFAULT_AUTH_CODE_MESSAGE = (
+ 'Enter the authorization code: ')
+ """str: The message to display when prompting the user for the
+ authorization code. Used only by the console strategy."""
+
+ _DEFAULT_WEB_SUCCESS_MESSAGE = (
+ 'The authentication flow has completed, you may close this window.')
+
+ def run_console(
+ self,
+ authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE,
+ authorization_code_message=_DEFAULT_AUTH_CODE_MESSAGE,
+ **kwargs):
+ """Run the flow using the console strategy.
+
+ The console strategy instructs the user to open the authorization URL
+ in their browser. Once the authorization is complete the authorization
+ server will give the user a code. The user then must copy & paste this
+ code into the application. The code is then exchanged for a token.
+
+ Args:
+ authorization_prompt_message (str): The message to display to tell
+ the user to navigate to the authorization URL.
+ authorization_code_message (str): The message to display when
+ prompting the user for the authorization code.
+ kwargs: Additional keyword arguments passed through to
+ :meth:`authorization_url`.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The OAuth 2.0 credentials
+ for the user.
+ """
+ kwargs.setdefault('prompt', 'consent')
+
+ self.redirect_uri = self._OOB_REDIRECT_URI
+
+ auth_url, _ = self.authorization_url(**kwargs)
+
+ return auth_url
+
+ def run_console_rest(self, code):
+ self.fetch_token(code=code)
+ return self.credentials
+
+ def run_local_server(
+ self, host='localhost', port=8080,
+ authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE,
+ success_message=_DEFAULT_WEB_SUCCESS_MESSAGE,
+ open_browser=True,
+ **kwargs):
+ """Run the flow using the server strategy.
+
+ The server strategy instructs the user to open the authorization URL in
+ their browser and will attempt to automatically open the URL for them.
+ It will start a local web server to listen for the authorization
+ response. Once authorization is complete the authorization server will
+ redirect the user's browser to the local web server. The web server
+ will get the authorization code from the response and shutdown. The
+ code is then exchanged for a token.
+
+ Args:
+ host (str): The hostname for the local redirect server. This will
+ be served over http, not https.
+ port (int): The port for the local redirect server.
+ authorization_prompt_message (str): The message to display to tell
+ the user to navigate to the authorization URL.
+ success_message (str): The message to display in the web browser
+ the authorization flow is complete.
+ open_browser (bool): Whether or not to open the authorization URL
+ in the user's browser.
+ kwargs: Additional keyword arguments passed through to
+ :meth:`authorization_url`.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The OAuth 2.0 credentials
+ for the user.
+ """
+ wsgi_app = _RedirectWSGIApp(success_message)
+ local_server = wsgiref.simple_server.make_server(
+ host, port, wsgi_app, handler_class=_WSGIRequestHandler)
+
+ self.redirect_uri = 'http://{}:{}/'.format(
+ host, local_server.server_port)
+ auth_url, _ = self.authorization_url(**kwargs)
+
+ if open_browser:
+ webbrowser.open(auth_url, new=1, autoraise=True)
+
+ print(authorization_prompt_message.format(url=auth_url))
+
+ local_server.handle_request()
+
+ # Note: using https here because oauthlib is very picky that
+ # OAuth 2.0 should only occur over https.
+ authorization_response = wsgi_app.last_request_uri.replace(
+ 'http', 'https')
+ self.fetch_token(authorization_response=authorization_response)
+
+ return self.credentials
+
+
+class _WSGIRequestHandler(wsgiref.simple_server.WSGIRequestHandler):
+ """Custom WSGIRequestHandler.
+
+ Uses a named logger instead of printing to stderr.
+ """
+ def log_message(self, format, *args):
+ # pylint: disable=redefined-builtin
+ # (format is the argument name defined in the superclass.)
+ _LOGGER.info(format, *args)
+
+
+class _RedirectWSGIApp(object):
+ """WSGI app to handle the authorization redirect.
+
+ Stores the request URI and displays the given success message.
+ """
+
+ def __init__(self, success_message):
+ """
+ Args:
+ success_message (str): The message to display in the web browser
+ the authorization flow is complete.
+ """
+ self.last_request_uri = None
+ self._success_message = success_message
+
+ def __call__(self, environ, start_response):
+ """WSGI Callable.
+
+ Args:
+ environ (Mapping[str, Any]): The WSGI environment.
+ start_response (Callable[str, list]): The WSGI start_response
+ callable.
+
+ Returns:
+ Iterable[bytes]: The response body.
+ """
+ start_response('200 OK', [('Content-type', 'text/plain')])
+ self.last_request_uri = wsgiref.util.request_uri(environ)
+ return [self._success_message.encode('utf-8')]
diff --git a/google_auth_oauthlib/helpers.py b/google_auth_oauthlib/helpers.py
new file mode 100644
index 0000000..d32864d
--- /dev/null
+++ b/google_auth_oauthlib/helpers.py
@@ -0,0 +1,142 @@
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Integration helpers.
+
+This module provides helpers for integrating with `requests-oauthlib`_.
+Typically, you'll want to use the higher-level helpers in
+:mod:`google_auth_oauthlib.flow`.
+
+.. _requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/stable/
+"""
+
+import datetime
+import json
+
+import google.oauth2.credentials
+import requests_oauthlib
+
+_REQUIRED_CONFIG_KEYS = frozenset(('auth_uri', 'token_uri', 'client_id'))
+
+
+def session_from_client_config(client_config, scopes, **kwargs):
+ """Creates a :class:`requests_oauthlib.OAuth2Session` from client
+ configuration loaded from a Google-format client secrets file.
+
+ Args:
+ client_config (Mapping[str, Any]): The client
+ configuration in the Google `client secrets`_ format.
+ scopes (Sequence[str]): The list of scopes to request during the
+ flow.
+ kwargs: Any additional parameters passed to
+ :class:`requests_oauthlib.OAuth2Session`
+
+ Raises:
+ ValueError: If the client configuration is not in the correct
+ format.
+
+ Returns:
+ Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new
+ oauthlib session and the validated client configuration.
+
+ .. _client secrets:
+ https://developers.google.com/api-client-library/python/guide
+ /aaa_client_secrets
+ """
+
+ if 'web' in client_config:
+ config = client_config['web']
+ elif 'installed' in client_config:
+ config = client_config['installed']
+ else:
+ raise ValueError(
+ 'Client secrets must be for a web or installed app.')
+
+ if not _REQUIRED_CONFIG_KEYS.issubset(config.keys()):
+ raise ValueError('Client secrets is not in the correct format.')
+
+ session = requests_oauthlib.OAuth2Session(
+ client_id=config['client_id'],
+ scope=scopes,
+ **kwargs)
+
+ return session, client_config
+
+
+def session_from_client_secrets_file(client_secrets_file, scopes, **kwargs):
+ """Creates a :class:`requests_oauthlib.OAuth2Session` instance from a
+ Google-format client secrets file.
+
+ Args:
+ client_secrets_file (str): The path to the `client secrets`_ .json
+ file.
+ scopes (Sequence[str]): The list of scopes to request during the
+ flow.
+ kwargs: Any additional parameters passed to
+ :class:`requests_oauthlib.OAuth2Session`
+
+ Returns:
+ Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new
+ oauthlib session and the validated client configuration.
+
+ .. _client secrets:
+ https://developers.google.com/api-client-library/python/guide
+ /aaa_client_secrets
+ """
+ with open(client_secrets_file, 'r') as json_file:
+ client_config = json.load(json_file)
+
+ return session_from_client_config(client_config, scopes, **kwargs)
+
+
+def credentials_from_session(session, client_config=None):
+ """Creates :class:`google.oauth2.credentials.Credentials` from a
+ :class:`requests_oauthlib.OAuth2Session`.
+
+ :meth:`fetch_token` must be called on the session before before calling
+ this. This uses the session's auth token and the provided client
+ configuration to create :class:`google.oauth2.credentials.Credentials`.
+ This allows you to use the credentials from the session with Google
+ API client libraries.
+
+ Args:
+ session (requests_oauthlib.OAuth2Session): The OAuth 2.0 session.
+ client_config (Mapping[str, Any]): The subset of the client
+ configuration to use. For example, if you have a web client
+ you would pass in `client_config['web']`.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: If there is no access token in the session.
+ """
+ client_config = client_config if client_config is not None else {}
+
+ if not session.token:
+ raise ValueError(
+ 'There is no access token for this session, did you call '
+ 'fetch_token?')
+
+ credentials = google.oauth2.credentials.Credentials(
+ session.token['access_token'],
+ refresh_token=session.token.get('refresh_token'),
+ id_token=session.token.get('id_token'),
+ token_uri=client_config.get('token_uri'),
+ client_id=client_config.get('client_id'),
+ client_secret=client_config.get('client_secret'),
+ scopes=session.scope)
+ credentials.expiry = datetime.datetime.utcfromtimestamp(
+ session.token['expires_at'])
+ return credentials
diff --git a/google_auth_oauthlib/interactive.py b/google_auth_oauthlib/interactive.py
new file mode 100644
index 0000000..c6a5d28
--- /dev/null
+++ b/google_auth_oauthlib/interactive.py
@@ -0,0 +1,105 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Get user credentials from interactive code environments.
+
+This module contains helpers for getting user credentials from interactive
+code environments installed on a development machine, such as Jupyter
+notebooks.
+"""
+
+from __future__ import absolute_import
+
+import google_auth_oauthlib.flow
+
+
+def get_user_credentials(scopes, client_id, client_secret):
+ """Gets credentials associated with your Google user account.
+
+ This function authenticates using your user credentials by going through
+ the OAuth 2.0 flow. You'll open a browser window to authenticate to your
+ Google account. The permissions it requests correspond to the scopes
+ you've provided.
+
+ To obtain the ``client_id`` and ``client_secret``, create an **OAuth
+ client ID** with application type **Other** from the `Credentials page on
+ the Google Developer's Console
+ `_. Learn more
+ with the `Authenticating as an end user
+ `_ guide.
+
+ Args:
+ scopes (Sequence[str]):
+ A list of scopes to use when authenticating to Google APIs. See
+ the `list of OAuth 2.0 scopes for Google APIs
+ `_.
+ client_id (str):
+ A string that identifies your application to Google APIs. Find
+ this value in the `Credentials page on the Google Developer's
+ Console
+ `_.
+ client_secret (str):
+ A string that verifies your application to Google APIs. Find this
+ value in the `Credentials page on the Google Developer's Console
+ `_.
+
+ Returns:
+ google.oauth2.credentials.Credentials:
+ The OAuth 2.0 credentials for the user.
+
+ Examples:
+ Get credentials for your user account and use them to run a query
+ with BigQuery::
+
+ import google_auth_oauthlib
+
+ # TODO: Create a client ID for your project.
+ client_id = "YOUR-CLIENT-ID.apps.googleusercontent.com"
+ client_secret = "abc_ThIsIsAsEcReT"
+
+ # TODO: Choose the needed scopes for your applications.
+ scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+
+ credentials = google_auth_oauthlib.get_user_credentials(
+ scopes, client_id, client_secret
+ )
+
+ # 1. Open the link.
+ # 2. Authorize the application to have access to your account.
+ # 3. Copy and paste the authorization code to the prompt.
+
+ # Use the credentials to construct a client for Google APIs.
+ from google.cloud import bigquery
+
+ bigquery_client = bigquery.Client(
+ credentials=credentials, project="your-project-id"
+ )
+ print(list(bigquery_client.query("SELECT 1").result()))
+ """
+
+ client_config = {
+ "installed": {
+ "client_id": client_id,
+ "client_secret": client_secret,
+ "redirect_uris": ["urn:ietf:wg:oauth:2.0:oob"],
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://oauth2.googleapis.com/token",
+ }
+ }
+
+ app_flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_config(
+ client_config, scopes=scopes
+ )
+
+ return app_flow.run_console()
diff --git a/google_auth_oauthlib/tool/__main__.py b/google_auth_oauthlib/tool/__main__.py
new file mode 100644
index 0000000..f4400cf
--- /dev/null
+++ b/google_auth_oauthlib/tool/__main__.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Command-line tool for obtaining authorization and credentials from a user.
+
+This tool uses the OAuth 2.0 Authorization Code grant as described in
+`section 1.3.1 of RFC6749`_ and implemeted by
+:class:`google_auth_oauthlib.flow.Flow`.
+
+This tool is intended for assist developers in obtaining credentials
+for testing applications where it may not be possible or easy to run a
+complete OAuth 2.0 authorization flow, especially in the case of code
+samples or embedded devices without input / display capabilities.
+
+This is not intended for production use where a combination of
+companion and on-device applications should complete the OAuth 2.0
+authorization flow to get authorization from the users.
+
+.. _section 1.3.1 of RFC6749: https://tools.ietf.org/html/rfc6749#section-1.3.1
+"""
+
+import json
+import os
+import os.path
+
+import click
+
+import google_auth_oauthlib.flow
+
+
+APP_NAME = 'google-oauthlib-tool'
+DEFAULT_CREDENTIALS_FILENAME = 'credentials.json'
+
+
+@click.command()
+@click.option(
+ '--client-secrets',
+ metavar='',
+ required=True,
+ help='Path to OAuth2 client secret JSON file.')
+@click.option(
+ '--scope',
+ multiple=True,
+ metavar='',
+ required=True,
+ help='API scopes to authorize access for.')
+@click.option(
+ '--save',
+ is_flag=True,
+ metavar='',
+ show_default=True,
+ default=False,
+ help='Save the credentials to file.')
+@click.option(
+ '--credentials',
+ metavar='',
+ show_default=True,
+ default=os.path.join(
+ click.get_app_dir(APP_NAME),
+ DEFAULT_CREDENTIALS_FILENAME
+ ),
+ help='Path to store OAuth2 credentials.')
+@click.option(
+ '--headless',
+ is_flag=True,
+ metavar='',
+ show_default=True, default=False,
+ help='Run a console based flow.')
+def main(client_secrets, scope, save, credentials, headless):
+ """Command-line tool for obtaining authorization and credentials from a user.
+
+ This tool uses the OAuth 2.0 Authorization Code grant as described
+ in section 1.3.1 of RFC6749:
+ https://tools.ietf.org/html/rfc6749#section-1.3.1
+
+ This tool is intended for assist developers in obtaining credentials
+ for testing applications where it may not be possible or easy to run a
+ complete OAuth 2.0 authorization flow, especially in the case of code
+ samples or embedded devices without input / display capabilities.
+
+ This is not intended for production use where a combination of
+ companion and on-device applications should complete the OAuth 2.0
+ authorization flow to get authorization from the users.
+
+ """
+
+ flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
+ client_secrets,
+ scopes=scope
+ )
+
+ if not headless:
+ creds = flow.run_local_server()
+ else:
+ creds = flow.run_console()
+
+ creds_data = {
+ 'token': creds.token,
+ 'refresh_token': creds.refresh_token,
+ 'token_uri': creds.token_uri,
+ 'client_id': creds.client_id,
+ 'client_secret': creds.client_secret,
+ 'scopes': creds.scopes
+ }
+
+ if save:
+ del creds_data['token']
+
+ config_path = os.path.dirname(credentials)
+ if config_path and not os.path.isdir(config_path):
+ os.makedirs(config_path)
+
+ with open(credentials, 'w') as outfile:
+ json.dump(creds_data, outfile)
+
+ click.echo('credentials saved: %s' % credentials)
+
+ else:
+ click.echo(json.dumps(creds_data))
+
+
+if __name__ == '__main__':
+ # pylint doesn't realize that click has changed the function signature.
+ main() # pylint: disable=no-value-for-parameter
diff --git a/googleapiclient/__init__.py b/googleapiclient/__init__.py
new file mode 100644
index 0000000..dfb68b7
--- /dev/null
+++ b/googleapiclient/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "1.7.9"
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+
+try: # Python 2.7+
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+logging.getLogger(__name__).addHandler(NullHandler())
diff --git a/googleapiclient/_auth.py b/googleapiclient/_auth.py
new file mode 100644
index 0000000..9d6d363
--- /dev/null
+++ b/googleapiclient/_auth.py
@@ -0,0 +1,147 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helpers for authentication using oauth2client or google-auth."""
+
+import httplib2
+
+try:
+ import google.auth
+ import google.auth.credentials
+ HAS_GOOGLE_AUTH = True
+except ImportError: # pragma: NO COVER
+ HAS_GOOGLE_AUTH = False
+
+try:
+ import google_auth_httplib2
+except ImportError: # pragma: NO COVER
+ google_auth_httplib2 = None
+
+try:
+ import oauth2client
+ import oauth2client.client
+ HAS_OAUTH2CLIENT = True
+except ImportError: # pragma: NO COVER
+ HAS_OAUTH2CLIENT = False
+
+
+def default_credentials():
+ """Returns Application Default Credentials."""
+ if HAS_GOOGLE_AUTH:
+ credentials, _ = google.auth.default()
+ return credentials
+ elif HAS_OAUTH2CLIENT:
+ return oauth2client.client.GoogleCredentials.get_application_default()
+ else:
+ raise EnvironmentError(
+ 'No authentication library is available. Please install either '
+ 'google-auth or oauth2client.')
+
+
+def with_scopes(credentials, scopes):
+ """Scopes the credentials if necessary.
+
+ Args:
+ credentials (Union[
+ google.auth.credentials.Credentials,
+ oauth2client.client.Credentials]): The credentials to scope.
+ scopes (Sequence[str]): The list of scopes.
+
+ Returns:
+ Union[google.auth.credentials.Credentials,
+ oauth2client.client.Credentials]: The scoped credentials.
+ """
+ if HAS_GOOGLE_AUTH and isinstance(
+ credentials, google.auth.credentials.Credentials):
+ return google.auth.credentials.with_scopes_if_required(
+ credentials, scopes)
+ else:
+ try:
+ if credentials.create_scoped_required():
+ return credentials.create_scoped(scopes)
+ else:
+ return credentials
+ except AttributeError:
+ return credentials
+
+
+def authorized_http(credentials):
+ """Returns an http client that is authorized with the given credentials.
+
+ Args:
+ credentials (Union[
+ google.auth.credentials.Credentials,
+ oauth2client.client.Credentials]): The credentials to use.
+
+ Returns:
+ Union[httplib2.Http, google_auth_httplib2.AuthorizedHttp]: An
+ authorized http client.
+ """
+ from googleapiclient.http import build_http
+
+ if HAS_GOOGLE_AUTH and isinstance(
+ credentials, google.auth.credentials.Credentials):
+ if google_auth_httplib2 is None:
+ raise ValueError(
+ 'Credentials from google.auth specified, but '
+ 'google-api-python-client is unable to use these credentials '
+ 'unless google-auth-httplib2 is installed. Please install '
+ 'google-auth-httplib2.')
+ return google_auth_httplib2.AuthorizedHttp(credentials,
+ http=build_http())
+ else:
+ return credentials.authorize(build_http())
+
+
+def refresh_credentials(credentials):
+ # Refresh must use a new http instance, as the one associated with the
+ # credentials could be a AuthorizedHttp or an oauth2client-decorated
+ # Http instance which would cause a weird recursive loop of refreshing
+ # and likely tear a hole in spacetime.
+ refresh_http = httplib2.Http()
+ if HAS_GOOGLE_AUTH and isinstance(
+ credentials, google.auth.credentials.Credentials):
+ request = google_auth_httplib2.Request(refresh_http)
+ return credentials.refresh(request)
+ else:
+ return credentials.refresh(refresh_http)
+
+
+def apply_credentials(credentials, headers):
+ # oauth2client and google-auth have the same interface for this.
+ if not is_valid(credentials):
+ refresh_credentials(credentials)
+ return credentials.apply(headers)
+
+
+def is_valid(credentials):
+ if HAS_GOOGLE_AUTH and isinstance(
+ credentials, google.auth.credentials.Credentials):
+ return credentials.valid
+ else:
+ return (
+ credentials.access_token is not None and
+ not credentials.access_token_expired)
+
+
+def get_credentials_from_http(http):
+ if http is None:
+ return None
+ elif hasattr(http.request, 'credentials'):
+ return http.request.credentials
+ elif (hasattr(http, 'credentials')
+ and not isinstance(http.credentials, httplib2.Credentials)):
+ return http.credentials
+ else:
+ return None
diff --git a/googleapiclient/_helpers.py b/googleapiclient/_helpers.py
new file mode 100644
index 0000000..5e8184b
--- /dev/null
+++ b/googleapiclient/_helpers.py
@@ -0,0 +1,204 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for commonly used utilities."""
+
+import functools
+import inspect
+import logging
+import warnings
+
+import six
+from six.moves import urllib
+
+
+logger = logging.getLogger(__name__)
+
+POSITIONAL_WARNING = 'WARNING'
+POSITIONAL_EXCEPTION = 'EXCEPTION'
+POSITIONAL_IGNORE = 'IGNORE'
+POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
+ POSITIONAL_IGNORE])
+
+positional_parameters_enforcement = POSITIONAL_WARNING
+
+_SYM_LINK_MESSAGE = 'File: {0}: Is a symbolic link.'
+_IS_DIR_MESSAGE = '{0}: Is a directory'
+_MISSING_FILE_MESSAGE = 'Cannot access {0}: No such file or directory'
+
+
+def positional(max_positional_args):
+ """A decorator to declare that only the first N arguments my be positional.
+
+ This decorator makes it easy to support Python 3 style keyword-only
+ parameters. For example, in Python 3 it is possible to write::
+
+ def fn(pos1, *, kwonly1=None, kwonly1=None):
+ ...
+
+ All named parameters after ``*`` must be a keyword::
+
+ fn(10, 'kw1', 'kw2') # Raises exception.
+ fn(10, kwonly1='kw1') # Ok.
+
+ Example
+ ^^^^^^^
+
+ To define a function like above, do::
+
+ @positional(1)
+ def fn(pos1, kwonly1=None, kwonly2=None):
+ ...
+
+ If no default value is provided to a keyword argument, it becomes a
+ required keyword argument::
+
+ @positional(0)
+ def fn(required_kw):
+ ...
+
+ This must be called with the keyword parameter::
+
+ fn() # Raises exception.
+ fn(10) # Raises exception.
+ fn(required_kw=10) # Ok.
+
+ When defining instance or class methods always remember to account for
+ ``self`` and ``cls``::
+
+ class MyClass(object):
+
+ @positional(2)
+ def my_method(self, pos1, kwonly1=None):
+ ...
+
+ @classmethod
+ @positional(2)
+ def my_method(cls, pos1, kwonly1=None):
+ ...
+
+ The positional decorator behavior is controlled by
+ ``_helpers.positional_parameters_enforcement``, which may be set to
+ ``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
+ ``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
+ nothing, respectively, if a declaration is violated.
+
+ Args:
+ max_positional_arguments: Maximum number of positional arguments. All
+ parameters after the this index must be
+ keyword only.
+
+ Returns:
+ A decorator that prevents using arguments after max_positional_args
+ from being used as positional parameters.
+
+ Raises:
+ TypeError: if a key-word only argument is provided as a positional
+ parameter, but only if
+ _helpers.positional_parameters_enforcement is set to
+ POSITIONAL_EXCEPTION.
+ """
+
+ def positional_decorator(wrapped):
+ @functools.wraps(wrapped)
+ def positional_wrapper(*args, **kwargs):
+ if len(args) > max_positional_args:
+ plural_s = ''
+ if max_positional_args != 1:
+ plural_s = 's'
+ message = ('{function}() takes at most {args_max} positional '
+ 'argument{plural} ({args_given} given)'.format(
+ function=wrapped.__name__,
+ args_max=max_positional_args,
+ args_given=len(args),
+ plural=plural_s))
+ if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
+ raise TypeError(message)
+ elif positional_parameters_enforcement == POSITIONAL_WARNING:
+ logger.warning(message)
+ return wrapped(*args, **kwargs)
+ return positional_wrapper
+
+ if isinstance(max_positional_args, six.integer_types):
+ return positional_decorator
+ else:
+ args, _, _, defaults = inspect.getargspec(max_positional_args)
+ return positional(len(args) - len(defaults))(max_positional_args)
+
+
+def parse_unique_urlencoded(content):
+ """Parses unique key-value parameters from urlencoded content.
+
+ Args:
+ content: string, URL-encoded key-value pairs.
+
+ Returns:
+ dict, The key-value pairs from ``content``.
+
+ Raises:
+ ValueError: if one of the keys is repeated.
+ """
+ urlencoded_params = urllib.parse.parse_qs(content)
+ params = {}
+ for key, value in six.iteritems(urlencoded_params):
+ if len(value) != 1:
+ msg = ('URL-encoded content contains a repeated value:'
+ '%s -> %s' % (key, ', '.join(value)))
+ raise ValueError(msg)
+ params[key] = value[0]
+ return params
+
+
+def update_query_params(uri, params):
+ """Updates a URI with new query parameters.
+
+ If a given key from ``params`` is repeated in the ``uri``, then
+ the URI will be considered invalid and an error will occur.
+
+ If the URI is valid, then each value from ``params`` will
+ replace the corresponding value in the query parameters (if
+ it exists).
+
+ Args:
+ uri: string, A valid URI, with potential existing query parameters.
+ params: dict, A dictionary of query parameters.
+
+ Returns:
+ The same URI but with the new query parameters added.
+ """
+ parts = urllib.parse.urlparse(uri)
+ query_params = parse_unique_urlencoded(parts.query)
+ query_params.update(params)
+ new_query = urllib.parse.urlencode(query_params)
+ new_parts = parts._replace(query=new_query)
+ return urllib.parse.urlunparse(new_parts)
+
+
+def _add_query_parameter(url, name, value):
+ """Adds a query parameter to a url.
+
+ Replaces the current value if it already exists in the URL.
+
+ Args:
+ url: string, url to add the query parameter to.
+ name: string, query parameter name.
+ value: string, query parameter value.
+
+ Returns:
+ Updated query parameter. Does not update the url if value is None.
+ """
+ if value is None:
+ return url
+ else:
+ return update_query_params(url, {name: value})
diff --git a/googleapiclient/channel.py b/googleapiclient/channel.py
new file mode 100644
index 0000000..3caee13
--- /dev/null
+++ b/googleapiclient/channel.py
@@ -0,0 +1,301 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Channel notifications support.
+
+Classes and functions to support channel subscriptions and notifications
+on those channels.
+
+Notes:
+ - This code is based on experimental APIs and is subject to change.
+ - Notification does not do deduplication of notification ids, that's up to
+ the receiver.
+ - Storing the Channel between calls is up to the caller.
+
+
+Example setting up a channel:
+
+ # Create a new channel that gets notifications via webhook.
+ channel = new_webhook_channel("https://example.com/my_web_hook")
+
+ # Store the channel, keyed by 'channel.id'. Store it before calling the
+ # watch method because notifications may start arriving before the watch
+ # method returns.
+ ...
+
+ resp = service.objects().watchAll(
+ bucket="some_bucket_id", body=channel.body()).execute()
+ channel.update(resp)
+
+ # Store the channel, keyed by 'channel.id'. Store it after being updated
+ # since the resource_id value will now be correct, and that's needed to
+ # stop a subscription.
+ ...
+
+
+An example Webhook implementation using webapp2. Note that webapp2 puts
+headers in a case insensitive dictionary, as headers aren't guaranteed to
+always be upper case.
+
+ id = self.request.headers[X_GOOG_CHANNEL_ID]
+
+ # Retrieve the channel by id.
+ channel = ...
+
+ # Parse notification from the headers, including validating the id.
+ n = notification_from_headers(channel, self.request.headers)
+
+ # Do app specific stuff with the notification here.
+ if n.resource_state == 'sync':
+ # Code to handle sync state.
+ elif n.resource_state == 'exists':
+ # Code to handle the exists state.
+ elif n.resource_state == 'not_exists':
+ # Code to handle the not exists state.
+
+
+Example of unsubscribing.
+
+ service.channels().stop(channel.body()).execute()
+"""
+from __future__ import absolute_import
+
+import datetime
+import uuid
+
+from googleapiclient import errors
+from googleapiclient import _helpers as util
+import six
+
+
+# The unix time epoch starts at midnight 1970.
+EPOCH = datetime.datetime.utcfromtimestamp(0)
+
+# Map the names of the parameters in the JSON channel description to
+# the parameter names we use in the Channel class.
+CHANNEL_PARAMS = {
+ 'address': 'address',
+ 'id': 'id',
+ 'expiration': 'expiration',
+ 'params': 'params',
+ 'resourceId': 'resource_id',
+ 'resourceUri': 'resource_uri',
+ 'type': 'type',
+ 'token': 'token',
+ }
+
+X_GOOG_CHANNEL_ID = 'X-GOOG-CHANNEL-ID'
+X_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER'
+X_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE'
+X_GOOG_RESOURCE_URI = 'X-GOOG-RESOURCE-URI'
+X_GOOG_RESOURCE_ID = 'X-GOOG-RESOURCE-ID'
+
+
+def _upper_header_keys(headers):
+ new_headers = {}
+ for k, v in six.iteritems(headers):
+ new_headers[k.upper()] = v
+ return new_headers
+
+
+class Notification(object):
+ """A Notification from a Channel.
+
+ Notifications are not usually constructed directly, but are returned
+ from functions like notification_from_headers().
+
+ Attributes:
+ message_number: int, The unique id number of this notification.
+ state: str, The state of the resource being monitored.
+ uri: str, The address of the resource being monitored.
+ resource_id: str, The unique identifier of the version of the resource at
+ this event.
+ """
+ @util.positional(5)
+ def __init__(self, message_number, state, resource_uri, resource_id):
+ """Notification constructor.
+
+ Args:
+ message_number: int, The unique id number of this notification.
+ state: str, The state of the resource being monitored. Can be one
+ of "exists", "not_exists", or "sync".
+ resource_uri: str, The address of the resource being monitored.
+ resource_id: str, The identifier of the watched resource.
+ """
+ self.message_number = message_number
+ self.state = state
+ self.resource_uri = resource_uri
+ self.resource_id = resource_id
+
+
+class Channel(object):
+ """A Channel for notifications.
+
+ Usually not constructed directly, instead it is returned from helper
+ functions like new_webhook_channel().
+
+ Attributes:
+ type: str, The type of delivery mechanism used by this channel. For
+ example, 'web_hook'.
+ id: str, A UUID for the channel.
+ token: str, An arbitrary string associated with the channel that
+ is delivered to the target address with each event delivered
+ over this channel.
+ address: str, The address of the receiving entity where events are
+ delivered. Specific to the channel type.
+ expiration: int, The time, in milliseconds from the epoch, when this
+ channel will expire.
+ params: dict, A dictionary of string to string, with additional parameters
+ controlling delivery channel behavior.
+ resource_id: str, An opaque id that identifies the resource that is
+ being watched. Stable across different API versions.
+ resource_uri: str, The canonicalized ID of the watched resource.
+ """
+
+ @util.positional(5)
+ def __init__(self, type, id, token, address, expiration=None,
+ params=None, resource_id="", resource_uri=""):
+ """Create a new Channel.
+
+ In user code, this Channel constructor will not typically be called
+ manually since there are functions for creating channels for each specific
+ type with a more customized set of arguments to pass.
+
+ Args:
+ type: str, The type of delivery mechanism used by this channel. For
+ example, 'web_hook'.
+ id: str, A UUID for the channel.
+ token: str, An arbitrary string associated with the channel that
+ is delivered to the target address with each event delivered
+ over this channel.
+ address: str, The address of the receiving entity where events are
+ delivered. Specific to the channel type.
+ expiration: int, The time, in milliseconds from the epoch, when this
+ channel will expire.
+ params: dict, A dictionary of string to string, with additional parameters
+ controlling delivery channel behavior.
+ resource_id: str, An opaque id that identifies the resource that is
+ being watched. Stable across different API versions.
+ resource_uri: str, The canonicalized ID of the watched resource.
+ """
+ self.type = type
+ self.id = id
+ self.token = token
+ self.address = address
+ self.expiration = expiration
+ self.params = params
+ self.resource_id = resource_id
+ self.resource_uri = resource_uri
+
+ def body(self):
+ """Build a body from the Channel.
+
+ Constructs a dictionary that's appropriate for passing into watch()
+ methods as the value of body argument.
+
+ Returns:
+ A dictionary representation of the channel.
+ """
+ result = {
+ 'id': self.id,
+ 'token': self.token,
+ 'type': self.type,
+ 'address': self.address
+ }
+ if self.params:
+ result['params'] = self.params
+ if self.resource_id:
+ result['resourceId'] = self.resource_id
+ if self.resource_uri:
+ result['resourceUri'] = self.resource_uri
+ if self.expiration:
+ result['expiration'] = self.expiration
+
+ return result
+
+ def update(self, resp):
+ """Update a channel with information from the response of watch().
+
+ When a request is sent to watch() a resource, the response returned
+ from the watch() request is a dictionary with updated channel information,
+ such as the resource_id, which is needed when stopping a subscription.
+
+ Args:
+ resp: dict, The response from a watch() method.
+ """
+ for json_name, param_name in six.iteritems(CHANNEL_PARAMS):
+ value = resp.get(json_name)
+ if value is not None:
+ setattr(self, param_name, value)
+
+
+def notification_from_headers(channel, headers):
+ """Parse a notification from the webhook request headers, validate
+ the notification, and return a Notification object.
+
+ Args:
+ channel: Channel, The channel that the notification is associated with.
+ headers: dict, A dictionary like object that contains the request headers
+ from the webhook HTTP request.
+
+ Returns:
+ A Notification object.
+
+ Raises:
+ errors.InvalidNotificationError if the notification is invalid.
+ ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.
+ """
+ headers = _upper_header_keys(headers)
+ channel_id = headers[X_GOOG_CHANNEL_ID]
+ if channel.id != channel_id:
+ raise errors.InvalidNotificationError(
+ 'Channel id mismatch: %s != %s' % (channel.id, channel_id))
+ else:
+ message_number = int(headers[X_GOOG_MESSAGE_NUMBER])
+ state = headers[X_GOOG_RESOURCE_STATE]
+ resource_uri = headers[X_GOOG_RESOURCE_URI]
+ resource_id = headers[X_GOOG_RESOURCE_ID]
+ return Notification(message_number, state, resource_uri, resource_id)
+
+
+@util.positional(2)
+def new_webhook_channel(url, token=None, expiration=None, params=None):
+ """Create a new webhook Channel.
+
+ Args:
+ url: str, URL to post notifications to.
+ token: str, An arbitrary string associated with the channel that
+ is delivered to the target address with each notification delivered
+ over this channel.
+ expiration: datetime.datetime, A time in the future when the channel
+ should expire. Can also be None if the subscription should use the
+ default expiration. Note that different services may have different
+ limits on how long a subscription lasts. Check the response from the
+ watch() method to see the value the service has set for an expiration
+ time.
+ params: dict, Extra parameters to pass on channel creation. Currently
+ not used for webhook channels.
+ """
+ expiration_ms = 0
+ if expiration:
+ delta = expiration - EPOCH
+ expiration_ms = delta.microseconds/1000 + (
+ delta.seconds + delta.days*24*3600)*1000
+ if expiration_ms < 0:
+ expiration_ms = 0
+
+ return Channel('web_hook', str(uuid.uuid4()),
+ token, url, expiration=expiration_ms,
+ params=params)
+
diff --git a/googleapiclient/discovery.py b/googleapiclient/discovery.py
new file mode 100644
index 0000000..7d895bb
--- /dev/null
+++ b/googleapiclient/discovery.py
@@ -0,0 +1,1191 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for discovery based APIs.
+
+A client library for Google's discovery based APIs.
+"""
+from __future__ import absolute_import
+import six
+from six.moves import zip
+
+__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+__all__ = [
+ 'build',
+ 'build_from_document',
+ 'fix_method_name',
+ 'key2param',
+ ]
+
+from six import BytesIO
+from six.moves import http_client
+from six.moves.urllib.parse import urlencode, urlparse, urljoin, \
+ urlunparse, parse_qsl
+
+# Standard library imports
+import copy
+try:
+ from email.generator import BytesGenerator
+except ImportError:
+ from email.generator import Generator as BytesGenerator
+from email.mime.multipart import MIMEMultipart
+from email.mime.nonmultipart import MIMENonMultipart
+import json
+import keyword
+import logging
+import mimetypes
+import os
+import re
+
+# Third-party imports
+import httplib2
+import uritemplate
+
+# Local imports
+from googleapiclient import _auth
+from googleapiclient import mimeparse
+from googleapiclient.errors import HttpError
+from googleapiclient.errors import InvalidJsonError
+from googleapiclient.errors import MediaUploadSizeError
+from googleapiclient.errors import UnacceptableMimeTypeError
+from googleapiclient.errors import UnknownApiNameOrVersion
+from googleapiclient.errors import UnknownFileType
+from googleapiclient.http import build_http
+from googleapiclient.http import BatchHttpRequest
+from googleapiclient.http import HttpMock
+from googleapiclient.http import HttpMockSequence
+from googleapiclient.http import HttpRequest
+from googleapiclient.http import MediaFileUpload
+from googleapiclient.http import MediaUpload
+from googleapiclient.model import JsonModel
+from googleapiclient.model import MediaModel
+from googleapiclient.model import RawModel
+from googleapiclient.schema import Schemas
+
+from googleapiclient._helpers import _add_query_parameter
+from googleapiclient._helpers import positional
+
+
+# The client library requires a version of httplib2 that supports RETRIES.
+httplib2.RETRIES = 1
+
+logger = logging.getLogger(__name__)
+
+URITEMPLATE = re.compile('{[^}]*}')
+VARNAME = re.compile('[a-zA-Z0-9_-]+')
+DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
+ '{api}/{apiVersion}/rest')
+V1_DISCOVERY_URI = DISCOVERY_URI
+V2_DISCOVERY_URI = ('https://{api}.googleapis.com/$discovery/rest?'
+ 'version={apiVersion}')
+DEFAULT_METHOD_DOC = 'A description of how to use this function'
+HTTP_PAYLOAD_METHODS = frozenset(['PUT', 'POST', 'PATCH'])
+
+_MEDIA_SIZE_BIT_SHIFTS = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
+BODY_PARAMETER_DEFAULT_VALUE = {
+ 'description': 'The request body.',
+ 'type': 'object',
+ 'required': True,
+}
+MEDIA_BODY_PARAMETER_DEFAULT_VALUE = {
+ 'description': ('The filename of the media request body, or an instance '
+ 'of a MediaUpload object.'),
+ 'type': 'string',
+ 'required': False,
+}
+MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE = {
+ 'description': ('The MIME type of the media request body, or an instance '
+ 'of a MediaUpload object.'),
+ 'type': 'string',
+ 'required': False,
+}
+_PAGE_TOKEN_NAMES = ('pageToken', 'nextPageToken')
+
+# Parameters accepted by the stack, but not visible via discovery.
+# TODO(dhermes): Remove 'userip' in 'v2'.
+STACK_QUERY_PARAMETERS = frozenset(['trace', 'pp', 'userip', 'strict'])
+STACK_QUERY_PARAMETER_DEFAULT_VALUE = {'type': 'string', 'location': 'query'}
+
+# Library-specific reserved words beyond Python keywords.
+RESERVED_WORDS = frozenset(['body'])
+
+# patch _write_lines to avoid munging '\r' into '\n'
+# ( https://bugs.python.org/issue18886 https://bugs.python.org/issue19003 )
+class _BytesGenerator(BytesGenerator):
+ _write_lines = BytesGenerator.write
+
+def fix_method_name(name):
+ """Fix method names to avoid '$' characters and reserved word conflicts.
+
+ Args:
+ name: string, method name.
+
+ Returns:
+ The name with '_' appended if the name is a reserved word and '$'
+ replaced with '_'.
+ """
+ name = name.replace('$', '_')
+ if keyword.iskeyword(name) or name in RESERVED_WORDS:
+ return name + '_'
+ else:
+ return name
+
+
+def key2param(key):
+ """Converts key names into parameter names.
+
+ For example, converting "max-results" -> "max_results"
+
+ Args:
+ key: string, the method key name.
+
+ Returns:
+ A safe method name based on the key name.
+ """
+ result = []
+ key = list(key)
+ if not key[0].isalpha():
+ result.append('x')
+ for c in key:
+ if c.isalnum():
+ result.append(c)
+ else:
+ result.append('_')
+
+ return ''.join(result)
+
+
+@positional(2)
+def build(serviceName,
+ version,
+ http=None,
+ discoveryServiceUrl=DISCOVERY_URI,
+ developerKey=None,
+ model=None,
+ requestBuilder=HttpRequest,
+ credentials=None,
+ cache_discovery=True,
+ cache=None):
+ """Construct a Resource for interacting with an API.
+
+ Construct a Resource object for interacting with an API. The serviceName and
+ version are the names from the Discovery service.
+
+ Args:
+ serviceName: string, name of the service.
+ version: string, the version of the service.
+ http: httplib2.Http, An instance of httplib2.Http or something that acts
+ like it that HTTP requests will be made through.
+ discoveryServiceUrl: string, a URI Template that points to the location of
+ the discovery service. It should have two parameters {api} and
+ {apiVersion} that when filled in produce an absolute URI to the discovery
+ document for that service.
+ developerKey: string, key obtained from
+ https://code.google.com/apis/console.
+ model: googleapiclient.Model, converts to and from the wire format.
+ requestBuilder: googleapiclient.http.HttpRequest, encapsulator for an HTTP
+ request.
+ credentials: oauth2client.Credentials or
+ google.auth.credentials.Credentials, credentials to be used for
+ authentication.
+ cache_discovery: Boolean, whether or not to cache the discovery doc.
+ cache: googleapiclient.discovery_cache.base.CacheBase, an optional
+ cache object for the discovery documents.
+
+ Returns:
+ A Resource object with methods for interacting with the service.
+ """
+ params = {
+ 'api': serviceName,
+ 'apiVersion': version
+ }
+
+ if http is None:
+ discovery_http = build_http()
+ else:
+ discovery_http = http
+
+ for discovery_url in (discoveryServiceUrl, V2_DISCOVERY_URI,):
+ requested_url = uritemplate.expand(discovery_url, params)
+
+ try:
+ content = _retrieve_discovery_doc(
+ requested_url, discovery_http, cache_discovery, cache, developerKey)
+ return build_from_document(content, base=discovery_url, http=http,
+ developerKey=developerKey, model=model, requestBuilder=requestBuilder,
+ credentials=credentials)
+ except HttpError as e:
+ if e.resp.status == http_client.NOT_FOUND:
+ continue
+ else:
+ raise e
+
+ raise UnknownApiNameOrVersion(
+ "name: %s version: %s" % (serviceName, version))
+
+
+def _retrieve_discovery_doc(url, http, cache_discovery, cache=None,
+ developerKey=None):
+ """Retrieves the discovery_doc from cache or the internet.
+
+ Args:
+ url: string, the URL of the discovery document.
+ http: httplib2.Http, An instance of httplib2.Http or something that acts
+ like it through which HTTP requests will be made.
+ cache_discovery: Boolean, whether or not to cache the discovery doc.
+ cache: googleapiclient.discovery_cache.base.Cache, an optional cache
+ object for the discovery documents.
+
+ Returns:
+ A unicode string representation of the discovery document.
+ """
+ if cache_discovery:
+ from . import discovery_cache
+ from .discovery_cache import base
+ if cache is None:
+ cache = discovery_cache.autodetect()
+ if cache:
+ content = cache.get(url)
+ if content:
+ return content
+
+ actual_url = url
+ # REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
+ # variable that contains the network address of the client sending the
+ # request. If it exists then add that to the request for the discovery
+ # document to avoid exceeding the quota on discovery requests.
+ if 'REMOTE_ADDR' in os.environ:
+ actual_url = _add_query_parameter(url, 'userIp', os.environ['REMOTE_ADDR'])
+ if developerKey:
+ actual_url = _add_query_parameter(url, 'key', developerKey)
+ logger.info('URL being requested: GET %s', actual_url)
+
+ resp, content = http.request(actual_url)
+
+ if resp.status >= 400:
+ raise HttpError(resp, content, uri=actual_url)
+
+ try:
+ content = content.decode('utf-8')
+ except AttributeError:
+ pass
+
+ try:
+ service = json.loads(content)
+ except ValueError as e:
+ logger.error('Failed to parse as JSON: ' + content)
+ raise InvalidJsonError()
+ if cache_discovery and cache:
+ cache.set(url, content)
+ return content
+
+
+@positional(1)
+def build_from_document(
+ service,
+ base=None,
+ future=None,
+ http=None,
+ developerKey=None,
+ model=None,
+ requestBuilder=HttpRequest,
+ credentials=None):
+ """Create a Resource for interacting with an API.
+
+ Same as `build()`, but constructs the Resource object from a discovery
+ document that is it given, as opposed to retrieving one over HTTP.
+
+ Args:
+ service: string or object, the JSON discovery document describing the API.
+ The value passed in may either be the JSON string or the deserialized
+ JSON.
+ base: string, base URI for all HTTP requests, usually the discovery URI.
+ This parameter is no longer used as rootUrl and servicePath are included
+ within the discovery document. (deprecated)
+ future: string, discovery document with future capabilities (deprecated).
+ http: httplib2.Http, An instance of httplib2.Http or something that acts
+ like it that HTTP requests will be made through.
+ developerKey: string, Key for controlling API usage, generated
+ from the API Console.
+ model: Model class instance that serializes and de-serializes requests and
+ responses.
+ requestBuilder: Takes an http request and packages it up to be executed.
+ credentials: oauth2client.Credentials or
+ google.auth.credentials.Credentials, credentials to be used for
+ authentication.
+
+ Returns:
+ A Resource object with methods for interacting with the service.
+ """
+
+ if http is not None and credentials is not None:
+ raise ValueError('Arguments http and credentials are mutually exclusive.')
+
+ if isinstance(service, six.string_types):
+ service = json.loads(service)
+
+ if 'rootUrl' not in service and (isinstance(http, (HttpMock,
+ HttpMockSequence))):
+ logger.error("You are using HttpMock or HttpMockSequence without" +
+ "having the service discovery doc in cache. Try calling " +
+ "build() without mocking once first to populate the " +
+ "cache.")
+ raise InvalidJsonError()
+
+ base = urljoin(service['rootUrl'], service['servicePath'])
+ schema = Schemas(service)
+
+ # If the http client is not specified, then we must construct an http client
+ # to make requests. If the service has scopes, then we also need to setup
+ # authentication.
+ if http is None:
+ # Does the service require scopes?
+ scopes = list(
+ service.get('auth', {}).get('oauth2', {}).get('scopes', {}).keys())
+
+ # If so, then the we need to setup authentication if no developerKey is
+ # specified.
+ if scopes and not developerKey:
+ # If the user didn't pass in credentials, attempt to acquire application
+ # default credentials.
+ if credentials is None:
+ credentials = _auth.default_credentials()
+
+ # The credentials need to be scoped.
+ credentials = _auth.with_scopes(credentials, scopes)
+
+ # If credentials are provided, create an authorized http instance;
+ # otherwise, skip authentication.
+ if credentials:
+ http = _auth.authorized_http(credentials)
+
+ # If the service doesn't require scopes then there is no need for
+ # authentication.
+ else:
+ http = build_http()
+
+ if model is None:
+ features = service.get('features', [])
+ model = JsonModel('dataWrapper' in features)
+
+ return Resource(http=http, baseUrl=base, model=model,
+ developerKey=developerKey, requestBuilder=requestBuilder,
+ resourceDesc=service, rootDesc=service, schema=schema)
+
+
+def _cast(value, schema_type):
+ """Convert value to a string based on JSON Schema type.
+
+ See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
+ JSON Schema.
+
+ Args:
+ value: any, the value to convert
+ schema_type: string, the type that value should be interpreted as
+
+ Returns:
+ A string representation of 'value' based on the schema_type.
+ """
+ if schema_type == 'string':
+ if type(value) == type('') or type(value) == type(u''):
+ return value
+ else:
+ return str(value)
+ elif schema_type == 'integer':
+ return str(int(value))
+ elif schema_type == 'number':
+ return str(float(value))
+ elif schema_type == 'boolean':
+ return str(bool(value)).lower()
+ else:
+ if type(value) == type('') or type(value) == type(u''):
+ return value
+ else:
+ return str(value)
+
+
+def _media_size_to_long(maxSize):
+ """Convert a string media size, such as 10GB or 3TB into an integer.
+
+ Args:
+ maxSize: string, size as a string, such as 2MB or 7GB.
+
+ Returns:
+ The size as an integer value.
+ """
+ if len(maxSize) < 2:
+ return 0
+ units = maxSize[-2:].upper()
+ bit_shift = _MEDIA_SIZE_BIT_SHIFTS.get(units)
+ if bit_shift is not None:
+ return int(maxSize[:-2]) << bit_shift
+ else:
+ return int(maxSize)
+
+
+def _media_path_url_from_info(root_desc, path_url):
+ """Creates an absolute media path URL.
+
+ Constructed using the API root URI and service path from the discovery
+ document and the relative path for the API method.
+
+ Args:
+ root_desc: Dictionary; the entire original deserialized discovery document.
+ path_url: String; the relative URL for the API method. Relative to the API
+ root, which is specified in the discovery document.
+
+ Returns:
+ String; the absolute URI for media upload for the API method.
+ """
+ return '%(root)supload/%(service_path)s%(path)s' % {
+ 'root': root_desc['rootUrl'],
+ 'service_path': root_desc['servicePath'],
+ 'path': path_url,
+ }
+
+
+def _fix_up_parameters(method_desc, root_desc, http_method, schema):
+ """Updates parameters of an API method with values specific to this library.
+
+ Specifically, adds whatever global parameters are specified by the API to the
+ parameters for the individual method. Also adds parameters which don't
+ appear in the discovery document, but are available to all discovery based
+ APIs (these are listed in STACK_QUERY_PARAMETERS).
+
+ SIDE EFFECTS: This updates the parameters dictionary object in the method
+ description.
+
+ Args:
+ method_desc: Dictionary with metadata describing an API method. Value comes
+ from the dictionary of methods stored in the 'methods' key in the
+ deserialized discovery document.
+ root_desc: Dictionary; the entire original deserialized discovery document.
+ http_method: String; the HTTP method used to call the API method described
+ in method_desc.
+ schema: Object, mapping of schema names to schema descriptions.
+
+ Returns:
+ The updated Dictionary stored in the 'parameters' key of the method
+ description dictionary.
+ """
+ parameters = method_desc.setdefault('parameters', {})
+
+ # Add in the parameters common to all methods.
+ for name, description in six.iteritems(root_desc.get('parameters', {})):
+ parameters[name] = description
+
+ # Add in undocumented query parameters.
+ for name in STACK_QUERY_PARAMETERS:
+ parameters[name] = STACK_QUERY_PARAMETER_DEFAULT_VALUE.copy()
+
+ # Add 'body' (our own reserved word) to parameters if the method supports
+ # a request payload.
+ if http_method in HTTP_PAYLOAD_METHODS and 'request' in method_desc:
+ body = BODY_PARAMETER_DEFAULT_VALUE.copy()
+ body.update(method_desc['request'])
+ # Make body optional for requests with no parameters.
+ if not _methodProperties(method_desc, schema, 'request'):
+ body['required'] = False
+ parameters['body'] = body
+
+ return parameters
+
+
+def _fix_up_media_upload(method_desc, root_desc, path_url, parameters):
+ """Adds 'media_body' and 'media_mime_type' parameters if supported by method.
+
+ SIDE EFFECTS: If the method supports media upload and has a required body,
+ sets body to be optional (required=False) instead. Also, if there is a
+ 'mediaUpload' in the method description, adds 'media_upload' key to
+ parameters.
+
+ Args:
+ method_desc: Dictionary with metadata describing an API method. Value comes
+ from the dictionary of methods stored in the 'methods' key in the
+ deserialized discovery document.
+ root_desc: Dictionary; the entire original deserialized discovery document.
+ path_url: String; the relative URL for the API method. Relative to the API
+ root, which is specified in the discovery document.
+ parameters: A dictionary describing method parameters for method described
+ in method_desc.
+
+ Returns:
+ Triple (accept, max_size, media_path_url) where:
+ - accept is a list of strings representing what content types are
+ accepted for media upload. Defaults to empty list if not in the
+ discovery document.
+ - max_size is a long representing the max size in bytes allowed for a
+ media upload. Defaults to 0L if not in the discovery document.
+ - media_path_url is a String; the absolute URI for media upload for the
+ API method. Constructed using the API root URI and service path from
+ the discovery document and the relative path for the API method. If
+ media upload is not supported, this is None.
+ """
+ media_upload = method_desc.get('mediaUpload', {})
+ accept = media_upload.get('accept', [])
+ max_size = _media_size_to_long(media_upload.get('maxSize', ''))
+ media_path_url = None
+
+ if media_upload:
+ media_path_url = _media_path_url_from_info(root_desc, path_url)
+ parameters['media_body'] = MEDIA_BODY_PARAMETER_DEFAULT_VALUE.copy()
+ parameters['media_mime_type'] = MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE.copy()
+ if 'body' in parameters:
+ parameters['body']['required'] = False
+
+ return accept, max_size, media_path_url
+
+
+def _fix_up_method_description(method_desc, root_desc, schema):
+ """Updates a method description in a discovery document.
+
+ SIDE EFFECTS: Changes the parameters dictionary in the method description with
+ extra parameters which are used locally.
+
+ Args:
+ method_desc: Dictionary with metadata describing an API method. Value comes
+ from the dictionary of methods stored in the 'methods' key in the
+ deserialized discovery document.
+ root_desc: Dictionary; the entire original deserialized discovery document.
+ schema: Object, mapping of schema names to schema descriptions.
+
+ Returns:
+ Tuple (path_url, http_method, method_id, accept, max_size, media_path_url)
+ where:
+ - path_url is a String; the relative URL for the API method. Relative to
+ the API root, which is specified in the discovery document.
+ - http_method is a String; the HTTP method used to call the API method
+ described in the method description.
+ - method_id is a String; the name of the RPC method associated with the
+ API method, and is in the method description in the 'id' key.
+ - accept is a list of strings representing what content types are
+ accepted for media upload. Defaults to empty list if not in the
+ discovery document.
+ - max_size is a long representing the max size in bytes allowed for a
+ media upload. Defaults to 0L if not in the discovery document.
+ - media_path_url is a String; the absolute URI for media upload for the
+ API method. Constructed using the API root URI and service path from
+ the discovery document and the relative path for the API method. If
+ media upload is not supported, this is None.
+ """
+ path_url = method_desc['path']
+ http_method = method_desc['httpMethod']
+ method_id = method_desc['id']
+
+ parameters = _fix_up_parameters(method_desc, root_desc, http_method, schema)
+ # Order is important. `_fix_up_media_upload` needs `method_desc` to have a
+ # 'parameters' key and needs to know if there is a 'body' parameter because it
+ # also sets a 'media_body' parameter.
+ accept, max_size, media_path_url = _fix_up_media_upload(
+ method_desc, root_desc, path_url, parameters)
+
+ return path_url, http_method, method_id, accept, max_size, media_path_url
+
+
+def _urljoin(base, url):
+ """Custom urljoin replacement supporting : before / in url."""
+ # In general, it's unsafe to simply join base and url. However, for
+ # the case of discovery documents, we know:
+ # * base will never contain params, query, or fragment
+ # * url will never contain a scheme or net_loc.
+ # In general, this means we can safely join on /; we just need to
+ # ensure we end up with precisely one / joining base and url. The
+ # exception here is the case of media uploads, where url will be an
+ # absolute url.
+ if url.startswith('http://') or url.startswith('https://'):
+ return urljoin(base, url)
+ new_base = base if base.endswith('/') else base + '/'
+ new_url = url[1:] if url.startswith('/') else url
+ return new_base + new_url
+
+
+# TODO(dhermes): Convert this class to ResourceMethod and make it callable
+class ResourceMethodParameters(object):
+ """Represents the parameters associated with a method.
+
+ Attributes:
+ argmap: Map from method parameter name (string) to query parameter name
+ (string).
+ required_params: List of required parameters (represented by parameter
+ name as string).
+ repeated_params: List of repeated parameters (represented by parameter
+ name as string).
+ pattern_params: Map from method parameter name (string) to regular
+ expression (as a string). If the pattern is set for a parameter, the
+ value for that parameter must match the regular expression.
+ query_params: List of parameters (represented by parameter name as string)
+ that will be used in the query string.
+ path_params: Set of parameters (represented by parameter name as string)
+ that will be used in the base URL path.
+ param_types: Map from method parameter name (string) to parameter type. Type
+ can be any valid JSON schema type; valid values are 'any', 'array',
+ 'boolean', 'integer', 'number', 'object', or 'string'. Reference:
+ http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1
+ enum_params: Map from method parameter name (string) to list of strings,
+ where each list of strings is the list of acceptable enum values.
+ """
+
+ def __init__(self, method_desc):
+ """Constructor for ResourceMethodParameters.
+
+ Sets default values and defers to set_parameters to populate.
+
+ Args:
+ method_desc: Dictionary with metadata describing an API method. Value
+ comes from the dictionary of methods stored in the 'methods' key in
+ the deserialized discovery document.
+ """
+ self.argmap = {}
+ self.required_params = []
+ self.repeated_params = []
+ self.pattern_params = {}
+ self.query_params = []
+ # TODO(dhermes): Change path_params to a list if the extra URITEMPLATE
+ # parsing is gotten rid of.
+ self.path_params = set()
+ self.param_types = {}
+ self.enum_params = {}
+
+ self.set_parameters(method_desc)
+
+ def set_parameters(self, method_desc):
+ """Populates maps and lists based on method description.
+
+ Iterates through each parameter for the method and parses the values from
+ the parameter dictionary.
+
+ Args:
+ method_desc: Dictionary with metadata describing an API method. Value
+ comes from the dictionary of methods stored in the 'methods' key in
+ the deserialized discovery document.
+ """
+ for arg, desc in six.iteritems(method_desc.get('parameters', {})):
+ param = key2param(arg)
+ self.argmap[param] = arg
+
+ if desc.get('pattern'):
+ self.pattern_params[param] = desc['pattern']
+ if desc.get('enum'):
+ self.enum_params[param] = desc['enum']
+ if desc.get('required'):
+ self.required_params.append(param)
+ if desc.get('repeated'):
+ self.repeated_params.append(param)
+ if desc.get('location') == 'query':
+ self.query_params.append(param)
+ if desc.get('location') == 'path':
+ self.path_params.add(param)
+ self.param_types[param] = desc.get('type', 'string')
+
+ # TODO(dhermes): Determine if this is still necessary. Discovery based APIs
+ # should have all path parameters already marked with
+ # 'location: path'.
+ for match in URITEMPLATE.finditer(method_desc['path']):
+ for namematch in VARNAME.finditer(match.group(0)):
+ name = key2param(namematch.group(0))
+ self.path_params.add(name)
+ if name in self.query_params:
+ self.query_params.remove(name)
+
+
+def createMethod(methodName, methodDesc, rootDesc, schema):
+ """Creates a method for attaching to a Resource.
+
+ Args:
+ methodName: string, name of the method to use.
+ methodDesc: object, fragment of deserialized discovery document that
+ describes the method.
+ rootDesc: object, the entire deserialized discovery document.
+ schema: object, mapping of schema names to schema descriptions.
+ """
+ methodName = fix_method_name(methodName)
+ (pathUrl, httpMethod, methodId, accept,
+ maxSize, mediaPathUrl) = _fix_up_method_description(methodDesc, rootDesc, schema)
+
+ parameters = ResourceMethodParameters(methodDesc)
+
+ def method(self, **kwargs):
+ # Don't bother with doc string, it will be over-written by createMethod.
+
+ for name in six.iterkeys(kwargs):
+ if name not in parameters.argmap:
+ raise TypeError('Got an unexpected keyword argument "%s"' % name)
+
+ # Remove args that have a value of None.
+ keys = list(kwargs.keys())
+ for name in keys:
+ if kwargs[name] is None:
+ del kwargs[name]
+
+ for name in parameters.required_params:
+ if name not in kwargs:
+ # temporary workaround for non-paging methods incorrectly requiring
+ # page token parameter (cf. drive.changes.watch vs. drive.changes.list)
+ if name not in _PAGE_TOKEN_NAMES or _findPageTokenName(
+ _methodProperties(methodDesc, schema, 'response')):
+ raise TypeError('Missing required parameter "%s"' % name)
+
+ for name, regex in six.iteritems(parameters.pattern_params):
+ if name in kwargs:
+ if isinstance(kwargs[name], six.string_types):
+ pvalues = [kwargs[name]]
+ else:
+ pvalues = kwargs[name]
+ for pvalue in pvalues:
+ if re.match(regex, pvalue) is None:
+ raise TypeError(
+ 'Parameter "%s" value "%s" does not match the pattern "%s"' %
+ (name, pvalue, regex))
+
+ for name, enums in six.iteritems(parameters.enum_params):
+ if name in kwargs:
+ # We need to handle the case of a repeated enum
+ # name differently, since we want to handle both
+ # arg='value' and arg=['value1', 'value2']
+ if (name in parameters.repeated_params and
+ not isinstance(kwargs[name], six.string_types)):
+ values = kwargs[name]
+ else:
+ values = [kwargs[name]]
+ for value in values:
+ if value not in enums:
+ raise TypeError(
+ 'Parameter "%s" value "%s" is not an allowed value in "%s"' %
+ (name, value, str(enums)))
+
+ actual_query_params = {}
+ actual_path_params = {}
+ for key, value in six.iteritems(kwargs):
+ to_type = parameters.param_types.get(key, 'string')
+ # For repeated parameters we cast each member of the list.
+ if key in parameters.repeated_params and type(value) == type([]):
+ cast_value = [_cast(x, to_type) for x in value]
+ else:
+ cast_value = _cast(value, to_type)
+ if key in parameters.query_params:
+ actual_query_params[parameters.argmap[key]] = cast_value
+ if key in parameters.path_params:
+ actual_path_params[parameters.argmap[key]] = cast_value
+ body_value = kwargs.get('body', None)
+ media_filename = kwargs.get('media_body', None)
+ media_mime_type = kwargs.get('media_mime_type', None)
+
+ if self._developerKey:
+ actual_query_params['key'] = self._developerKey
+
+ model = self._model
+ if methodName.endswith('_media'):
+ model = MediaModel()
+ elif 'response' not in methodDesc:
+ model = RawModel()
+
+ headers = {}
+ headers, params, query, body = model.request(headers,
+ actual_path_params, actual_query_params, body_value)
+
+ expanded_url = uritemplate.expand(pathUrl, params)
+ url = _urljoin(self._baseUrl, expanded_url + query)
+
+ resumable = None
+ multipart_boundary = ''
+
+ if media_filename:
+ # Ensure we end up with a valid MediaUpload object.
+ if isinstance(media_filename, six.string_types):
+ if media_mime_type is None:
+ logger.warning(
+ 'media_mime_type argument not specified: trying to auto-detect for %s',
+ media_filename)
+ media_mime_type, _ = mimetypes.guess_type(media_filename)
+ if media_mime_type is None:
+ raise UnknownFileType(media_filename)
+ if not mimeparse.best_match([media_mime_type], ','.join(accept)):
+ raise UnacceptableMimeTypeError(media_mime_type)
+ media_upload = MediaFileUpload(media_filename,
+ mimetype=media_mime_type)
+ elif isinstance(media_filename, MediaUpload):
+ media_upload = media_filename
+ else:
+ raise TypeError('media_filename must be str or MediaUpload.')
+
+ # Check the maxSize
+ if media_upload.size() is not None and media_upload.size() > maxSize > 0:
+ raise MediaUploadSizeError("Media larger than: %s" % maxSize)
+
+ # Use the media path uri for media uploads
+ expanded_url = uritemplate.expand(mediaPathUrl, params)
+ url = _urljoin(self._baseUrl, expanded_url + query)
+ if media_upload.resumable():
+ url = _add_query_parameter(url, 'uploadType', 'resumable')
+
+ if media_upload.resumable():
+ # This is all we need to do for resumable, if the body exists it gets
+ # sent in the first request, otherwise an empty body is sent.
+ resumable = media_upload
+ else:
+ # A non-resumable upload
+ if body is None:
+ # This is a simple media upload
+ headers['content-type'] = media_upload.mimetype()
+ body = media_upload.getbytes(0, media_upload.size())
+ url = _add_query_parameter(url, 'uploadType', 'media')
+ else:
+ # This is a multipart/related upload.
+ msgRoot = MIMEMultipart('related')
+ # msgRoot should not write out it's own headers
+ setattr(msgRoot, '_write_headers', lambda self: None)
+
+ # attach the body as one part
+ msg = MIMENonMultipart(*headers['content-type'].split('/'))
+ msg.set_payload(body)
+ msgRoot.attach(msg)
+
+ # attach the media as the second part
+ msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
+ msg['Content-Transfer-Encoding'] = 'binary'
+
+ payload = media_upload.getbytes(0, media_upload.size())
+ msg.set_payload(payload)
+ msgRoot.attach(msg)
+ # encode the body: note that we can't use `as_string`, because
+ # it plays games with `From ` lines.
+ fp = BytesIO()
+ g = _BytesGenerator(fp, mangle_from_=False)
+ g.flatten(msgRoot, unixfrom=False)
+ body = fp.getvalue()
+
+ multipart_boundary = msgRoot.get_boundary()
+ headers['content-type'] = ('multipart/related; '
+ 'boundary="%s"') % multipart_boundary
+ url = _add_query_parameter(url, 'uploadType', 'multipart')
+
+ logger.info('URL being requested: %s %s' % (httpMethod,url))
+ return self._requestBuilder(self._http,
+ model.response,
+ url,
+ method=httpMethod,
+ body=body,
+ headers=headers,
+ methodId=methodId,
+ resumable=resumable)
+
+ docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
+ if len(parameters.argmap) > 0:
+ docs.append('Args:\n')
+
+ # Skip undocumented params and params common to all methods.
+ skip_parameters = list(rootDesc.get('parameters', {}).keys())
+ skip_parameters.extend(STACK_QUERY_PARAMETERS)
+
+ all_args = list(parameters.argmap.keys())
+ args_ordered = [key2param(s) for s in methodDesc.get('parameterOrder', [])]
+
+ # Move body to the front of the line.
+ if 'body' in all_args:
+ args_ordered.append('body')
+
+ for name in all_args:
+ if name not in args_ordered:
+ args_ordered.append(name)
+
+ for arg in args_ordered:
+ if arg in skip_parameters:
+ continue
+
+ repeated = ''
+ if arg in parameters.repeated_params:
+ repeated = ' (repeated)'
+ required = ''
+ if arg in parameters.required_params:
+ required = ' (required)'
+ paramdesc = methodDesc['parameters'][parameters.argmap[arg]]
+ paramdoc = paramdesc.get('description', 'A parameter')
+ if '$ref' in paramdesc:
+ docs.append(
+ (' %s: object, %s%s%s\n The object takes the'
+ ' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
+ schema.prettyPrintByName(paramdesc['$ref'])))
+ else:
+ paramtype = paramdesc.get('type', 'string')
+ docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
+ repeated))
+ enum = paramdesc.get('enum', [])
+ enumDesc = paramdesc.get('enumDescriptions', [])
+ if enum and enumDesc:
+ docs.append(' Allowed values\n')
+ for (name, desc) in zip(enum, enumDesc):
+ docs.append(' %s - %s\n' % (name, desc))
+ if 'response' in methodDesc:
+ if methodName.endswith('_media'):
+ docs.append('\nReturns:\n The media object as a string.\n\n ')
+ else:
+ docs.append('\nReturns:\n An object of the form:\n\n ')
+ docs.append(schema.prettyPrintSchema(methodDesc['response']))
+
+ setattr(method, '__doc__', ''.join(docs))
+ return (methodName, method)
+
+
+def createNextMethod(methodName,
+ pageTokenName='pageToken',
+ nextPageTokenName='nextPageToken',
+ isPageTokenParameter=True):
+ """Creates any _next methods for attaching to a Resource.
+
+ The _next methods allow for easy iteration through list() responses.
+
+ Args:
+ methodName: string, name of the method to use.
+ pageTokenName: string, name of request page token field.
+ nextPageTokenName: string, name of response page token field.
+ isPageTokenParameter: Boolean, True if request page token is a query
+ parameter, False if request page token is a field of the request body.
+ """
+ methodName = fix_method_name(methodName)
+
+ def methodNext(self, previous_request, previous_response):
+ """Retrieves the next page of results.
+
+Args:
+ previous_request: The request for the previous page. (required)
+ previous_response: The response from the request for the previous page. (required)
+
+Returns:
+ A request object that you can call 'execute()' on to request the next
+ page. Returns None if there are no more items in the collection.
+ """
+ # Retrieve nextPageToken from previous_response
+ # Use as pageToken in previous_request to create new request.
+
+ nextPageToken = previous_response.get(nextPageTokenName, None)
+ if not nextPageToken:
+ return None
+
+ request = copy.copy(previous_request)
+
+ if isPageTokenParameter:
+ # Replace pageToken value in URI
+ request.uri = _add_query_parameter(
+ request.uri, pageTokenName, nextPageToken)
+ logger.info('Next page request URL: %s %s' % (methodName, request.uri))
+ else:
+ # Replace pageToken value in request body
+ model = self._model
+ body = model.deserialize(request.body)
+ body[pageTokenName] = nextPageToken
+ request.body = model.serialize(body)
+ logger.info('Next page request body: %s %s' % (methodName, body))
+
+ return request
+
+ return (methodName, methodNext)
+
+
+class Resource(object):
+ """A class for interacting with a resource."""
+
+ def __init__(self, http, baseUrl, model, requestBuilder, developerKey,
+ resourceDesc, rootDesc, schema):
+ """Build a Resource from the API description.
+
+ Args:
+ http: httplib2.Http, Object to make http requests with.
+ baseUrl: string, base URL for the API. All requests are relative to this
+ URI.
+ model: googleapiclient.Model, converts to and from the wire format.
+ requestBuilder: class or callable that instantiates an
+ googleapiclient.HttpRequest object.
+ developerKey: string, key obtained from
+ https://code.google.com/apis/console
+ resourceDesc: object, section of deserialized discovery document that
+ describes a resource. Note that the top level discovery document
+ is considered a resource.
+ rootDesc: object, the entire deserialized discovery document.
+ schema: object, mapping of schema names to schema descriptions.
+ """
+ self._dynamic_attrs = []
+
+ self._http = http
+ self._baseUrl = baseUrl
+ self._model = model
+ self._developerKey = developerKey
+ self._requestBuilder = requestBuilder
+ self._resourceDesc = resourceDesc
+ self._rootDesc = rootDesc
+ self._schema = schema
+
+ self._set_service_methods()
+
+ def _set_dynamic_attr(self, attr_name, value):
+ """Sets an instance attribute and tracks it in a list of dynamic attributes.
+
+ Args:
+ attr_name: string; The name of the attribute to be set
+ value: The value being set on the object and tracked in the dynamic cache.
+ """
+ self._dynamic_attrs.append(attr_name)
+ self.__dict__[attr_name] = value
+
+ def __getstate__(self):
+ """Trim the state down to something that can be pickled.
+
+ Uses the fact that the instance variable _dynamic_attrs holds attrs that
+ will be wiped and restored on pickle serialization.
+ """
+ state_dict = copy.copy(self.__dict__)
+ for dynamic_attr in self._dynamic_attrs:
+ del state_dict[dynamic_attr]
+ del state_dict['_dynamic_attrs']
+ return state_dict
+
+ def __setstate__(self, state):
+ """Reconstitute the state of the object from being pickled.
+
+ Uses the fact that the instance variable _dynamic_attrs holds attrs that
+ will be wiped and restored on pickle serialization.
+ """
+ self.__dict__.update(state)
+ self._dynamic_attrs = []
+ self._set_service_methods()
+
+ def _set_service_methods(self):
+ self._add_basic_methods(self._resourceDesc, self._rootDesc, self._schema)
+ self._add_nested_resources(self._resourceDesc, self._rootDesc, self._schema)
+ self._add_next_methods(self._resourceDesc, self._schema)
+
+ def _add_basic_methods(self, resourceDesc, rootDesc, schema):
+ # If this is the root Resource, add a new_batch_http_request() method.
+ if resourceDesc == rootDesc:
+ batch_uri = '%s%s' % (
+ rootDesc['rootUrl'], rootDesc.get('batchPath', 'batch'))
+ def new_batch_http_request(callback=None):
+ """Create a BatchHttpRequest object based on the discovery document.
+
+ Args:
+ callback: callable, A callback to be called for each response, of the
+ form callback(id, response, exception). The first parameter is the
+ request id, and the second is the deserialized response object. The
+ third is an apiclient.errors.HttpError exception object if an HTTP
+ error occurred while processing the request, or None if no error
+ occurred.
+
+ Returns:
+ A BatchHttpRequest object based on the discovery document.
+ """
+ return BatchHttpRequest(callback=callback, batch_uri=batch_uri)
+ self._set_dynamic_attr('new_batch_http_request', new_batch_http_request)
+
+ # Add basic methods to Resource
+ if 'methods' in resourceDesc:
+ for methodName, methodDesc in six.iteritems(resourceDesc['methods']):
+ fixedMethodName, method = createMethod(
+ methodName, methodDesc, rootDesc, schema)
+ self._set_dynamic_attr(fixedMethodName,
+ method.__get__(self, self.__class__))
+ # Add in _media methods. The functionality of the attached method will
+ # change when it sees that the method name ends in _media.
+ if methodDesc.get('supportsMediaDownload', False):
+ fixedMethodName, method = createMethod(
+ methodName + '_media', methodDesc, rootDesc, schema)
+ self._set_dynamic_attr(fixedMethodName,
+ method.__get__(self, self.__class__))
+
+ def _add_nested_resources(self, resourceDesc, rootDesc, schema):
+ # Add in nested resources
+ if 'resources' in resourceDesc:
+
+ def createResourceMethod(methodName, methodDesc):
+ """Create a method on the Resource to access a nested Resource.
+
+ Args:
+ methodName: string, name of the method to use.
+ methodDesc: object, fragment of deserialized discovery document that
+ describes the method.
+ """
+ methodName = fix_method_name(methodName)
+
+ def methodResource(self):
+ return Resource(http=self._http, baseUrl=self._baseUrl,
+ model=self._model, developerKey=self._developerKey,
+ requestBuilder=self._requestBuilder,
+ resourceDesc=methodDesc, rootDesc=rootDesc,
+ schema=schema)
+
+ setattr(methodResource, '__doc__', 'A collection resource.')
+ setattr(methodResource, '__is_resource__', True)
+
+ return (methodName, methodResource)
+
+ for methodName, methodDesc in six.iteritems(resourceDesc['resources']):
+ fixedMethodName, method = createResourceMethod(methodName, methodDesc)
+ self._set_dynamic_attr(fixedMethodName,
+ method.__get__(self, self.__class__))
+
+ def _add_next_methods(self, resourceDesc, schema):
+ # Add _next() methods if and only if one of the names 'pageToken' or
+ # 'nextPageToken' occurs among the fields of both the method's response
+ # type either the method's request (query parameters) or request body.
+ if 'methods' not in resourceDesc:
+ return
+ for methodName, methodDesc in six.iteritems(resourceDesc['methods']):
+ nextPageTokenName = _findPageTokenName(
+ _methodProperties(methodDesc, schema, 'response'))
+ if not nextPageTokenName:
+ continue
+ isPageTokenParameter = True
+ pageTokenName = _findPageTokenName(methodDesc.get('parameters', {}))
+ if not pageTokenName:
+ isPageTokenParameter = False
+ pageTokenName = _findPageTokenName(
+ _methodProperties(methodDesc, schema, 'request'))
+ if not pageTokenName:
+ continue
+ fixedMethodName, method = createNextMethod(
+ methodName + '_next', pageTokenName, nextPageTokenName,
+ isPageTokenParameter)
+ self._set_dynamic_attr(fixedMethodName,
+ method.__get__(self, self.__class__))
+
+
+def _findPageTokenName(fields):
+ """Search field names for one like a page token.
+
+ Args:
+ fields: container of string, names of fields.
+
+ Returns:
+ First name that is either 'pageToken' or 'nextPageToken' if one exists,
+ otherwise None.
+ """
+ return next((tokenName for tokenName in _PAGE_TOKEN_NAMES
+ if tokenName in fields), None)
+
+def _methodProperties(methodDesc, schema, name):
+ """Get properties of a field in a method description.
+
+ Args:
+ methodDesc: object, fragment of deserialized discovery document that
+ describes the method.
+ schema: object, mapping of schema names to schema descriptions.
+ name: string, name of top-level field in method description.
+
+ Returns:
+ Object representing fragment of deserialized discovery document
+ corresponding to 'properties' field of object corresponding to named field
+ in method description, if it exists, otherwise empty dict.
+ """
+ desc = methodDesc.get(name, {})
+ if '$ref' in desc:
+ desc = schema.get(desc['$ref'], {})
+ return desc.get('properties', {})
diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py
new file mode 100644
index 0000000..f86a06d
--- /dev/null
+++ b/googleapiclient/discovery_cache/__init__.py
@@ -0,0 +1,45 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Caching utility for the discovery document."""
+
+from __future__ import absolute_import
+
+import logging
+import datetime
+
+
+LOGGER = logging.getLogger(__name__)
+
+DISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day
+
+
+def autodetect():
+ """Detects an appropriate cache module and returns it.
+
+ Returns:
+ googleapiclient.discovery_cache.base.Cache, a cache object which
+ is auto detected, or None if no cache object is available.
+ """
+ try:
+ from google.appengine.api import memcache
+ from . import appengine_memcache
+ return appengine_memcache.cache
+ except Exception:
+ try:
+ from . import file_cache
+ return file_cache.cache
+ except Exception as e:
+ LOGGER.warning(e, exc_info=True)
+ return None
diff --git a/googleapiclient/discovery_cache/appengine_memcache.py b/googleapiclient/discovery_cache/appengine_memcache.py
new file mode 100644
index 0000000..7e43e66
--- /dev/null
+++ b/googleapiclient/discovery_cache/appengine_memcache.py
@@ -0,0 +1,55 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""App Engine memcache based cache for the discovery document."""
+
+import logging
+
+# This is only an optional dependency because we only import this
+# module when google.appengine.api.memcache is available.
+from google.appengine.api import memcache
+
+from . import base
+from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
+
+
+LOGGER = logging.getLogger(__name__)
+
+NAMESPACE = 'google-api-client'
+
+
+class Cache(base.Cache):
+ """A cache with app engine memcache API."""
+
+ def __init__(self, max_age):
+ """Constructor.
+
+ Args:
+ max_age: Cache expiration in seconds.
+ """
+ self._max_age = max_age
+
+ def get(self, url):
+ try:
+ return memcache.get(url, namespace=NAMESPACE)
+ except Exception as e:
+ LOGGER.warning(e, exc_info=True)
+
+ def set(self, url, content):
+ try:
+ memcache.set(url, content, time=int(self._max_age), namespace=NAMESPACE)
+ except Exception as e:
+ LOGGER.warning(e, exc_info=True)
+
+cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)
diff --git a/googleapiclient/discovery_cache/base.py b/googleapiclient/discovery_cache/base.py
new file mode 100644
index 0000000..00e466d
--- /dev/null
+++ b/googleapiclient/discovery_cache/base.py
@@ -0,0 +1,45 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""An abstract class for caching the discovery document."""
+
+import abc
+
+
+class Cache(object):
+ """A base abstract cache class."""
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def get(self, url):
+ """Gets the content from the memcache with a given key.
+
+ Args:
+ url: string, the key for the cache.
+
+ Returns:
+ object, the value in the cache for the given key, or None if the key is
+ not in the cache.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set(self, url, content):
+ """Sets the given key and content in the cache.
+
+ Args:
+ url: string, the key for the cache.
+ content: string, the discovery document.
+ """
+ raise NotImplementedError()
diff --git a/googleapiclient/discovery_cache/file_cache.py b/googleapiclient/discovery_cache/file_cache.py
new file mode 100644
index 0000000..48bddea
--- /dev/null
+++ b/googleapiclient/discovery_cache/file_cache.py
@@ -0,0 +1,141 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""File based cache for the discovery document.
+
+The cache is stored in a single file so that multiple processes can
+share the same cache. It locks the file whenever accesing to the
+file. When the cache content is corrupted, it will be initialized with
+an empty cache.
+"""
+
+from __future__ import division
+
+import datetime
+import json
+import logging
+import os
+import tempfile
+import threading
+
+try:
+ from oauth2client.contrib.locked_file import LockedFile
+except ImportError:
+ # oauth2client < 2.0.0
+ try:
+ from oauth2client.locked_file import LockedFile
+ except ImportError:
+ # oauth2client > 4.0.0 or google-auth
+ raise ImportError(
+ 'file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth')
+
+from . import base
+from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
+
+LOGGER = logging.getLogger(__name__)
+
+FILENAME = 'google-api-python-client-discovery-doc.cache'
+EPOCH = datetime.datetime.utcfromtimestamp(0)
+
+
+def _to_timestamp(date):
+ try:
+ return (date - EPOCH).total_seconds()
+ except AttributeError:
+ # The following is the equivalent of total_seconds() in Python2.6.
+ # See also: https://docs.python.org/2/library/datetime.html
+ delta = date - EPOCH
+ return ((delta.microseconds + (delta.seconds + delta.days * 24 * 3600)
+ * 10**6) / 10**6)
+
+
+def _read_or_initialize_cache(f):
+ f.file_handle().seek(0)
+ try:
+ cache = json.load(f.file_handle())
+ except Exception:
+ # This means it opens the file for the first time, or the cache is
+ # corrupted, so initializing the file with an empty dict.
+ cache = {}
+ f.file_handle().truncate(0)
+ f.file_handle().seek(0)
+ json.dump(cache, f.file_handle())
+ return cache
+
+
+class Cache(base.Cache):
+ """A file based cache for the discovery documents."""
+
+ def __init__(self, max_age):
+ """Constructor.
+
+ Args:
+ max_age: Cache expiration in seconds.
+ """
+ self._max_age = max_age
+ self._file = os.path.join(tempfile.gettempdir(), FILENAME)
+ f = LockedFile(self._file, 'a+', 'r')
+ try:
+ f.open_and_lock()
+ if f.is_locked():
+ _read_or_initialize_cache(f)
+ # If we can not obtain the lock, other process or thread must
+ # have initialized the file.
+ except Exception as e:
+ LOGGER.warning(e, exc_info=True)
+ finally:
+ f.unlock_and_close()
+
+ def get(self, url):
+ f = LockedFile(self._file, 'r+', 'r')
+ try:
+ f.open_and_lock()
+ if f.is_locked():
+ cache = _read_or_initialize_cache(f)
+ if url in cache:
+ content, t = cache.get(url, (None, 0))
+ if _to_timestamp(datetime.datetime.now()) < t + self._max_age:
+ return content
+ return None
+ else:
+ LOGGER.debug('Could not obtain a lock for the cache file.')
+ return None
+ except Exception as e:
+ LOGGER.warning(e, exc_info=True)
+ finally:
+ f.unlock_and_close()
+
+ def set(self, url, content):
+ f = LockedFile(self._file, 'r+', 'r')
+ try:
+ f.open_and_lock()
+ if f.is_locked():
+ cache = _read_or_initialize_cache(f)
+ cache[url] = (content, _to_timestamp(datetime.datetime.now()))
+ # Remove stale cache.
+ for k, (_, timestamp) in list(cache.items()):
+ if _to_timestamp(datetime.datetime.now()) >= timestamp + self._max_age:
+ del cache[k]
+ f.file_handle().truncate(0)
+ f.file_handle().seek(0)
+ json.dump(cache, f.file_handle())
+ else:
+ LOGGER.debug('Could not obtain a lock for the cache file.')
+ except Exception as e:
+ LOGGER.warning(e, exc_info=True)
+ finally:
+ f.unlock_and_close()
+
+
+cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)
diff --git a/googleapiclient/errors.py b/googleapiclient/errors.py
new file mode 100644
index 0000000..8c4795c
--- /dev/null
+++ b/googleapiclient/errors.py
@@ -0,0 +1,157 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Errors for the library.
+
+All exceptions defined by the library
+should be defined in this file.
+"""
+from __future__ import absolute_import
+
+__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+
+import json
+
+from googleapiclient import _helpers as util
+
+
+class Error(Exception):
+ """Base error for this module."""
+ pass
+
+
+class HttpError(Error):
+ """HTTP data was invalid or unexpected."""
+
+ @util.positional(3)
+ def __init__(self, resp, content, uri=None):
+ self.resp = resp
+ if not isinstance(content, bytes):
+ raise TypeError("HTTP content should be bytes")
+ self.content = content
+ self.uri = uri
+ self.error_details = ''
+
+ def _get_reason(self):
+ """Calculate the reason for the error from the response content."""
+ reason = self.resp.reason
+ try:
+ data = json.loads(self.content.decode('utf-8'))
+ if isinstance(data, dict):
+ reason = data['error']['message']
+ if 'details' in data['error']:
+ self.error_details = data['error']['details']
+ elif isinstance(data, list) and len(data) > 0:
+ first_error = data[0]
+ reason = first_error['error']['message']
+ if 'details' in first_error['error']:
+ self.error_details = first_error['error']['details']
+ except (ValueError, KeyError, TypeError):
+ pass
+ if reason is None:
+ reason = ''
+ return reason
+
+ def __repr__(self):
+ reason = self._get_reason()
+ if self.error_details:
+ return '' % \
+ (self.resp.status, self.uri, reason.strip(), self.error_details)
+ elif self.uri:
+ return '' % (
+ self.resp.status, self.uri, self._get_reason().strip())
+ else:
+ return '' % (self.resp.status, self._get_reason())
+
+ __str__ = __repr__
+
+
+class InvalidJsonError(Error):
+ """The JSON returned could not be parsed."""
+ pass
+
+
+class UnknownFileType(Error):
+ """File type unknown or unexpected."""
+ pass
+
+
+class UnknownLinkType(Error):
+ """Link type unknown or unexpected."""
+ pass
+
+
+class UnknownApiNameOrVersion(Error):
+ """No API with that name and version exists."""
+ pass
+
+
+class UnacceptableMimeTypeError(Error):
+ """That is an unacceptable mimetype for this operation."""
+ pass
+
+
+class MediaUploadSizeError(Error):
+ """Media is larger than the method can accept."""
+ pass
+
+
+class ResumableUploadError(HttpError):
+ """Error occured during resumable upload."""
+ pass
+
+
+class InvalidChunkSizeError(Error):
+ """The given chunksize is not valid."""
+ pass
+
+class InvalidNotificationError(Error):
+ """The channel Notification is invalid."""
+ pass
+
+class BatchError(HttpError):
+ """Error occured during batch operations."""
+
+ @util.positional(2)
+ def __init__(self, reason, resp=None, content=None):
+ self.resp = resp
+ self.content = content
+ self.reason = reason
+
+ def __repr__(self):
+ if getattr(self.resp, 'status', None) is None:
+ return '' % (self.reason)
+ else:
+ return '' % (self.resp.status, self.reason)
+
+ __str__ = __repr__
+
+
+class UnexpectedMethodError(Error):
+ """Exception raised by RequestMockBuilder on unexpected calls."""
+
+ @util.positional(1)
+ def __init__(self, methodId=None):
+ """Constructor for an UnexpectedMethodError."""
+ super(UnexpectedMethodError, self).__init__(
+ 'Received unexpected call %s' % methodId)
+
+
+class UnexpectedBodyError(Error):
+ """Exception raised by RequestMockBuilder on unexpected bodies."""
+
+ def __init__(self, expected, provided):
+ """Constructor for an UnexpectedMethodError."""
+ super(UnexpectedBodyError, self).__init__(
+ 'Expected: [%s] - Provided: [%s]' % (expected, provided))
diff --git a/googleapiclient/http.py b/googleapiclient/http.py
new file mode 100644
index 0000000..5caca19
--- /dev/null
+++ b/googleapiclient/http.py
@@ -0,0 +1,1787 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes to encapsulate a single HTTP request.
+
+The classes implement a command pattern, with every
+object supporting an execute() method that does the
+actual HTTP request.
+"""
+from __future__ import absolute_import
+import six
+from six.moves import http_client
+from six.moves import range
+
+__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+
+from six import BytesIO, StringIO
+from six.moves.urllib.parse import urlparse, urlunparse, quote, unquote
+
+import base64
+import copy
+import gzip
+import httplib2
+import json
+import logging
+import mimetypes
+import os
+import random
+import socket
+import sys
+import time
+import uuid
+
+# TODO(issue 221): Remove this conditional import jibbajabba.
+try:
+ import ssl
+except ImportError:
+ _ssl_SSLError = object()
+else:
+ _ssl_SSLError = ssl.SSLError
+
+from email.generator import Generator
+from email.mime.multipart import MIMEMultipart
+from email.mime.nonmultipart import MIMENonMultipart
+from email.parser import FeedParser
+
+from googleapiclient import _helpers as util
+
+from googleapiclient import _auth
+from googleapiclient.errors import BatchError
+from googleapiclient.errors import HttpError
+from googleapiclient.errors import InvalidChunkSizeError
+from googleapiclient.errors import ResumableUploadError
+from googleapiclient.errors import UnexpectedBodyError
+from googleapiclient.errors import UnexpectedMethodError
+from googleapiclient.model import JsonModel
+
+
+LOGGER = logging.getLogger(__name__)
+
+DEFAULT_CHUNK_SIZE = 100*1024*1024
+
+MAX_URI_LENGTH = 2048
+
+MAX_BATCH_LIMIT = 1000
+
+_TOO_MANY_REQUESTS = 429
+
+DEFAULT_HTTP_TIMEOUT_SEC = 60
+
+_LEGACY_BATCH_URI = 'https://www.googleapis.com/batch'
+
+
+def _should_retry_response(resp_status, content):
+ """Determines whether a response should be retried.
+
+ Args:
+ resp_status: The response status received.
+ content: The response content body.
+
+ Returns:
+ True if the response should be retried, otherwise False.
+ """
+ # Retry on 5xx errors.
+ if resp_status >= 500:
+ return True
+
+ # Retry on 429 errors.
+ if resp_status == _TOO_MANY_REQUESTS:
+ return True
+
+ # For 403 errors, we have to check for the `reason` in the response to
+ # determine if we should retry.
+ if resp_status == six.moves.http_client.FORBIDDEN:
+ # If there's no details about the 403 type, don't retry.
+ if not content:
+ return False
+
+ # Content is in JSON format.
+ try:
+ data = json.loads(content.decode('utf-8'))
+ if isinstance(data, dict):
+ reason = data['error']['errors'][0]['reason']
+ else:
+ reason = data[0]['error']['errors']['reason']
+ except (UnicodeDecodeError, ValueError, KeyError):
+ LOGGER.warning('Invalid JSON content from response: %s', content)
+ return False
+
+ LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason)
+
+ # Only retry on rate limit related failures.
+ if reason in ('userRateLimitExceeded', 'rateLimitExceeded', ):
+ return True
+
+ # Everything else is a success or non-retriable so break.
+ return False
+
+
+def _retry_request(http, num_retries, req_type, sleep, rand, uri, method, *args,
+ **kwargs):
+ """Retries an HTTP request multiple times while handling errors.
+
+ If after all retries the request still fails, last error is either returned as
+ return value (for HTTP 5xx errors) or thrown (for ssl.SSLError).
+
+ Args:
+ http: Http object to be used to execute request.
+ num_retries: Maximum number of retries.
+ req_type: Type of the request (used for logging retries).
+ sleep, rand: Functions to sleep for random time between retries.
+ uri: URI to be requested.
+ method: HTTP method to be used.
+ args, kwargs: Additional arguments passed to http.request.
+
+ Returns:
+ resp, content - Response from the http request (may be HTTP 5xx).
+ """
+ resp = None
+ content = None
+ for retry_num in range(num_retries + 1):
+ if retry_num > 0:
+ # Sleep before retrying.
+ sleep_time = rand() * 2 ** retry_num
+ LOGGER.warning(
+ 'Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s',
+ sleep_time, retry_num, num_retries, req_type, method, uri,
+ resp.status if resp else exception)
+ sleep(sleep_time)
+
+ try:
+ exception = None
+ resp, content = http.request(uri, method, *args, **kwargs)
+ # Retry on SSL errors and socket timeout errors.
+ except _ssl_SSLError as ssl_error:
+ exception = ssl_error
+ except socket.timeout as socket_timeout:
+ # It's important that this be before socket.error as it's a subclass
+ # socket.timeout has no errorcode
+ exception = socket_timeout
+ except socket.error as socket_error:
+ # errno's contents differ by platform, so we have to match by name.
+ if socket.errno.errorcode.get(socket_error.errno) not in {
+ 'WSAETIMEDOUT', 'ETIMEDOUT', 'EPIPE', 'ECONNABORTED'}:
+ raise
+ exception = socket_error
+ except httplib2.ServerNotFoundError as server_not_found_error:
+ exception = server_not_found_error
+
+ if exception:
+ if retry_num == num_retries:
+ raise exception
+ else:
+ continue
+
+ if not _should_retry_response(resp.status, content):
+ break
+
+ return resp, content
+
+
+class MediaUploadProgress(object):
+ """Status of a resumable upload."""
+
+ def __init__(self, resumable_progress, total_size):
+ """Constructor.
+
+ Args:
+ resumable_progress: int, bytes sent so far.
+ total_size: int, total bytes in complete upload, or None if the total
+ upload size isn't known ahead of time.
+ """
+ self.resumable_progress = resumable_progress
+ self.total_size = total_size
+
+ def progress(self):
+ """Percent of upload completed, as a float.
+
+ Returns:
+ the percentage complete as a float, returning 0.0 if the total size of
+ the upload is unknown.
+ """
+ if self.total_size is not None and self.total_size != 0:
+ return float(self.resumable_progress) / float(self.total_size)
+ else:
+ return 0.0
+
+
+class MediaDownloadProgress(object):
+ """Status of a resumable download."""
+
+ def __init__(self, resumable_progress, total_size):
+ """Constructor.
+
+ Args:
+ resumable_progress: int, bytes received so far.
+ total_size: int, total bytes in complete download.
+ """
+ self.resumable_progress = resumable_progress
+ self.total_size = total_size
+
+ def progress(self):
+ """Percent of download completed, as a float.
+
+ Returns:
+ the percentage complete as a float, returning 0.0 if the total size of
+ the download is unknown.
+ """
+ if self.total_size is not None and self.total_size != 0:
+ return float(self.resumable_progress) / float(self.total_size)
+ else:
+ return 0.0
+
+
+class MediaUpload(object):
+ """Describes a media object to upload.
+
+ Base class that defines the interface of MediaUpload subclasses.
+
+ Note that subclasses of MediaUpload may allow you to control the chunksize
+ when uploading a media object. It is important to keep the size of the chunk
+ as large as possible to keep the upload efficient. Other factors may influence
+ the size of the chunk you use, particularly if you are working in an
+ environment where individual HTTP requests may have a hardcoded time limit,
+ such as under certain classes of requests under Google App Engine.
+
+ Streams are io.Base compatible objects that support seek(). Some MediaUpload
+ subclasses support using streams directly to upload data. Support for
+ streaming may be indicated by a MediaUpload sub-class and if appropriate for a
+ platform that stream will be used for uploading the media object. The support
+ for streaming is indicated by has_stream() returning True. The stream() method
+ should return an io.Base object that supports seek(). On platforms where the
+ underlying httplib module supports streaming, for example Python 2.6 and
+ later, the stream will be passed into the http library which will result in
+ less memory being used and possibly faster uploads.
+
+ If you need to upload media that can't be uploaded using any of the existing
+ MediaUpload sub-class then you can sub-class MediaUpload for your particular
+ needs.
+ """
+
+ def chunksize(self):
+ """Chunk size for resumable uploads.
+
+ Returns:
+ Chunk size in bytes.
+ """
+ raise NotImplementedError()
+
+ def mimetype(self):
+ """Mime type of the body.
+
+ Returns:
+ Mime type.
+ """
+ return 'application/octet-stream'
+
+ def size(self):
+ """Size of upload.
+
+ Returns:
+ Size of the body, or None of the size is unknown.
+ """
+ return None
+
+ def resumable(self):
+ """Whether this upload is resumable.
+
+ Returns:
+ True if resumable upload or False.
+ """
+ return False
+
+ def getbytes(self, begin, end):
+ """Get bytes from the media.
+
+ Args:
+ begin: int, offset from beginning of file.
+ length: int, number of bytes to read, starting at begin.
+
+ Returns:
+ A string of bytes read. May be shorter than length if EOF was reached
+ first.
+ """
+ raise NotImplementedError()
+
+ def has_stream(self):
+ """Does the underlying upload support a streaming interface.
+
+ Streaming means it is an io.IOBase subclass that supports seek, i.e.
+ seekable() returns True.
+
+ Returns:
+ True if the call to stream() will return an instance of a seekable io.Base
+ subclass.
+ """
+ return False
+
+ def stream(self):
+ """A stream interface to the data being uploaded.
+
+ Returns:
+ The returned value is an io.IOBase subclass that supports seek, i.e.
+ seekable() returns True.
+ """
+ raise NotImplementedError()
+
+ @util.positional(1)
+ def _to_json(self, strip=None):
+ """Utility function for creating a JSON representation of a MediaUpload.
+
+ Args:
+ strip: array, An array of names of members to not include in the JSON.
+
+ Returns:
+ string, a JSON representation of this instance, suitable to pass to
+ from_json().
+ """
+ t = type(self)
+ d = copy.copy(self.__dict__)
+ if strip is not None:
+ for member in strip:
+ del d[member]
+ d['_class'] = t.__name__
+ d['_module'] = t.__module__
+ return json.dumps(d)
+
+ def to_json(self):
+ """Create a JSON representation of an instance of MediaUpload.
+
+ Returns:
+ string, a JSON representation of this instance, suitable to pass to
+ from_json().
+ """
+ return self._to_json()
+
+ @classmethod
+ def new_from_json(cls, s):
+ """Utility class method to instantiate a MediaUpload subclass from a JSON
+ representation produced by to_json().
+
+ Args:
+ s: string, JSON from to_json().
+
+ Returns:
+ An instance of the subclass of MediaUpload that was serialized with
+ to_json().
+ """
+ data = json.loads(s)
+ # Find and call the right classmethod from_json() to restore the object.
+ module = data['_module']
+ m = __import__(module, fromlist=module.split('.')[:-1])
+ kls = getattr(m, data['_class'])
+ from_json = getattr(kls, 'from_json')
+ return from_json(s)
+
+
+class MediaIoBaseUpload(MediaUpload):
+ """A MediaUpload for a io.Base objects.
+
+ Note that the Python file object is compatible with io.Base and can be used
+ with this class also.
+
+ fh = BytesIO('...Some data to upload...')
+ media = MediaIoBaseUpload(fh, mimetype='image/png',
+ chunksize=1024*1024, resumable=True)
+ farm.animals().insert(
+ id='cow',
+ name='cow.png',
+ media_body=media).execute()
+
+ Depending on the platform you are working on, you may pass -1 as the
+ chunksize, which indicates that the entire file should be uploaded in a single
+ request. If the underlying platform supports streams, such as Python 2.6 or
+ later, then this can be very efficient as it avoids multiple connections, and
+ also avoids loading the entire file into memory before sending it. Note that
+ Google App Engine has a 5MB limit on request size, so you should never set
+ your chunksize larger than 5MB, or to -1.
+ """
+
+ @util.positional(3)
+ def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
+ resumable=False):
+ """Constructor.
+
+ Args:
+ fd: io.Base or file object, The source of the bytes to upload. MUST be
+ opened in blocking mode, do not use streams opened in non-blocking mode.
+ The given stream must be seekable, that is, it must be able to call
+ seek() on fd.
+ mimetype: string, Mime-type of the file.
+ chunksize: int, File will be uploaded in chunks of this many bytes. Only
+ used if resumable=True. Pass in a value of -1 if the file is to be
+ uploaded as a single chunk. Note that Google App Engine has a 5MB limit
+ on request size, so you should never set your chunksize larger than 5MB,
+ or to -1.
+ resumable: bool, True if this is a resumable upload. False means upload
+ in a single request.
+ """
+ super(MediaIoBaseUpload, self).__init__()
+ self._fd = fd
+ self._mimetype = mimetype
+ if not (chunksize == -1 or chunksize > 0):
+ raise InvalidChunkSizeError()
+ self._chunksize = chunksize
+ self._resumable = resumable
+
+ self._fd.seek(0, os.SEEK_END)
+ self._size = self._fd.tell()
+
+ def chunksize(self):
+ """Chunk size for resumable uploads.
+
+ Returns:
+ Chunk size in bytes.
+ """
+ return self._chunksize
+
+ def mimetype(self):
+ """Mime type of the body.
+
+ Returns:
+ Mime type.
+ """
+ return self._mimetype
+
+ def size(self):
+ """Size of upload.
+
+ Returns:
+ Size of the body, or None of the size is unknown.
+ """
+ return self._size
+
+ def resumable(self):
+ """Whether this upload is resumable.
+
+ Returns:
+ True if resumable upload or False.
+ """
+ return self._resumable
+
+ def getbytes(self, begin, length):
+ """Get bytes from the media.
+
+ Args:
+ begin: int, offset from beginning of file.
+ length: int, number of bytes to read, starting at begin.
+
+ Returns:
+ A string of bytes read. May be shorted than length if EOF was reached
+ first.
+ """
+ self._fd.seek(begin)
+ return self._fd.read(length)
+
+ def has_stream(self):
+ """Does the underlying upload support a streaming interface.
+
+ Streaming means it is an io.IOBase subclass that supports seek, i.e.
+ seekable() returns True.
+
+ Returns:
+ True if the call to stream() will return an instance of a seekable io.Base
+ subclass.
+ """
+ return True
+
+ def stream(self):
+ """A stream interface to the data being uploaded.
+
+ Returns:
+ The returned value is an io.IOBase subclass that supports seek, i.e.
+ seekable() returns True.
+ """
+ return self._fd
+
+ def to_json(self):
+ """This upload type is not serializable."""
+ raise NotImplementedError('MediaIoBaseUpload is not serializable.')
+
+
+class MediaFileUpload(MediaIoBaseUpload):
+ """A MediaUpload for a file.
+
+ Construct a MediaFileUpload and pass as the media_body parameter of the
+ method. For example, if we had a service that allowed uploading images:
+
+ media = MediaFileUpload('cow.png', mimetype='image/png',
+ chunksize=1024*1024, resumable=True)
+ farm.animals().insert(
+ id='cow',
+ name='cow.png',
+ media_body=media).execute()
+
+ Depending on the platform you are working on, you may pass -1 as the
+ chunksize, which indicates that the entire file should be uploaded in a single
+ request. If the underlying platform supports streams, such as Python 2.6 or
+ later, then this can be very efficient as it avoids multiple connections, and
+ also avoids loading the entire file into memory before sending it. Note that
+ Google App Engine has a 5MB limit on request size, so you should never set
+ your chunksize larger than 5MB, or to -1.
+ """
+
+ @util.positional(2)
+ def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE,
+ resumable=False):
+ """Constructor.
+
+ Args:
+ filename: string, Name of the file.
+ mimetype: string, Mime-type of the file. If None then a mime-type will be
+ guessed from the file extension.
+ chunksize: int, File will be uploaded in chunks of this many bytes. Only
+ used if resumable=True. Pass in a value of -1 if the file is to be
+ uploaded in a single chunk. Note that Google App Engine has a 5MB limit
+ on request size, so you should never set your chunksize larger than 5MB,
+ or to -1.
+ resumable: bool, True if this is a resumable upload. False means upload
+ in a single request.
+ """
+ self._filename = filename
+ fd = open(self._filename, 'rb')
+ if mimetype is None:
+ # No mimetype provided, make a guess.
+ mimetype, _ = mimetypes.guess_type(filename)
+ if mimetype is None:
+ # Guess failed, use octet-stream.
+ mimetype = 'application/octet-stream'
+ super(MediaFileUpload, self).__init__(fd, mimetype, chunksize=chunksize,
+ resumable=resumable)
+
+ def to_json(self):
+ """Creating a JSON representation of an instance of MediaFileUpload.
+
+ Returns:
+ string, a JSON representation of this instance, suitable to pass to
+ from_json().
+ """
+ return self._to_json(strip=['_fd'])
+
+ @staticmethod
+ def from_json(s):
+ d = json.loads(s)
+ return MediaFileUpload(d['_filename'], mimetype=d['_mimetype'],
+ chunksize=d['_chunksize'], resumable=d['_resumable'])
+
+
+class MediaInMemoryUpload(MediaIoBaseUpload):
+ """MediaUpload for a chunk of bytes.
+
+ DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
+ the stream.
+ """
+
+ @util.positional(2)
+ def __init__(self, body, mimetype='application/octet-stream',
+ chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
+ """Create a new MediaInMemoryUpload.
+
+ DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
+ the stream.
+
+ Args:
+ body: string, Bytes of body content.
+ mimetype: string, Mime-type of the file or default of
+ 'application/octet-stream'.
+ chunksize: int, File will be uploaded in chunks of this many bytes. Only
+ used if resumable=True.
+ resumable: bool, True if this is a resumable upload. False means upload
+ in a single request.
+ """
+ fd = BytesIO(body)
+ super(MediaInMemoryUpload, self).__init__(fd, mimetype, chunksize=chunksize,
+ resumable=resumable)
+
+
+class MediaIoBaseDownload(object):
+ """"Download media resources.
+
+ Note that the Python file object is compatible with io.Base and can be used
+ with this class also.
+
+
+ Example:
+ request = farms.animals().get_media(id='cow')
+ fh = io.FileIO('cow.png', mode='wb')
+ downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
+
+ done = False
+ while done is False:
+ status, done = downloader.next_chunk()
+ if status:
+ print "Download %d%%." % int(status.progress() * 100)
+ print "Download Complete!"
+ """
+
+ @util.positional(3)
+ def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
+ """Constructor.
+
+ Args:
+ fd: io.Base or file object, The stream in which to write the downloaded
+ bytes.
+ request: googleapiclient.http.HttpRequest, the media request to perform in
+ chunks.
+ chunksize: int, File will be downloaded in chunks of this many bytes.
+ """
+ self._fd = fd
+ self._request = request
+ self._uri = request.uri
+ self._chunksize = chunksize
+ self._progress = 0
+ self._total_size = None
+ self._done = False
+
+ # Stubs for testing.
+ self._sleep = time.sleep
+ self._rand = random.random
+
+ self._headers = {}
+ for k, v in six.iteritems(request.headers):
+ # allow users to supply custom headers by setting them on the request
+ # but strip out the ones that are set by default on requests generated by
+ # API methods like Drive's files().get(fileId=...)
+ if not k.lower() in ('accept', 'accept-encoding', 'user-agent'):
+ self._headers[k] = v
+
+ @util.positional(1)
+ def next_chunk(self, num_retries=0):
+ """Get the next chunk of the download.
+
+ Args:
+ num_retries: Integer, number of times to retry with randomized
+ exponential backoff. If all retries fail, the raised HttpError
+ represents the last request. If zero (default), we attempt the
+ request only once.
+
+ Returns:
+ (status, done): (MediaDownloadProgress, boolean)
+ The value of 'done' will be True when the media has been fully
+ downloaded or the total size of the media is unknown.
+
+ Raises:
+ googleapiclient.errors.HttpError if the response was not a 2xx.
+ httplib2.HttpLib2Error if a transport error has occured.
+ """
+ headers = self._headers.copy()
+ headers['range'] = 'bytes=%d-%d' % (
+ self._progress, self._progress + self._chunksize)
+ http = self._request.http
+
+ resp, content = _retry_request(
+ http, num_retries, 'media download', self._sleep, self._rand, self._uri,
+ 'GET', headers=headers)
+
+ if resp.status in [200, 206]:
+ if 'content-location' in resp and resp['content-location'] != self._uri:
+ self._uri = resp['content-location']
+ self._progress += len(content)
+ self._fd.write(content)
+
+ if 'content-range' in resp:
+ content_range = resp['content-range']
+ length = content_range.rsplit('/', 1)[1]
+ self._total_size = int(length)
+ elif 'content-length' in resp:
+ self._total_size = int(resp['content-length'])
+
+ if self._total_size is None or self._progress == self._total_size:
+ self._done = True
+ return MediaDownloadProgress(self._progress, self._total_size), self._done
+ else:
+ raise HttpError(resp, content, uri=self._uri)
+
+
+class _StreamSlice(object):
+ """Truncated stream.
+
+ Takes a stream and presents a stream that is a slice of the original stream.
+ This is used when uploading media in chunks. In later versions of Python a
+ stream can be passed to httplib in place of the string of data to send. The
+ problem is that httplib just blindly reads to the end of the stream. This
+ wrapper presents a virtual stream that only reads to the end of the chunk.
+ """
+
+ def __init__(self, stream, begin, chunksize):
+ """Constructor.
+
+ Args:
+ stream: (io.Base, file object), the stream to wrap.
+ begin: int, the seek position the chunk begins at.
+ chunksize: int, the size of the chunk.
+ """
+ self._stream = stream
+ self._begin = begin
+ self._chunksize = chunksize
+ self._stream.seek(begin)
+
+ def read(self, n=-1):
+ """Read n bytes.
+
+ Args:
+ n, int, the number of bytes to read.
+
+ Returns:
+ A string of length 'n', or less if EOF is reached.
+ """
+ # The data left available to read sits in [cur, end)
+ cur = self._stream.tell()
+ end = self._begin + self._chunksize
+ if n == -1 or cur + n > end:
+ n = end - cur
+ return self._stream.read(n)
+
+
+class HttpRequest(object):
+ """Encapsulates a single HTTP request."""
+
+ @util.positional(4)
+ def __init__(self, http, postproc, uri,
+ method='GET',
+ body=None,
+ headers=None,
+ methodId=None,
+ resumable=None):
+ """Constructor for an HttpRequest.
+
+ Args:
+ http: httplib2.Http, the transport object to use to make a request
+ postproc: callable, called on the HTTP response and content to transform
+ it into a data object before returning, or raising an exception
+ on an error.
+ uri: string, the absolute URI to send the request to
+ method: string, the HTTP method to use
+ body: string, the request body of the HTTP request,
+ headers: dict, the HTTP request headers
+ methodId: string, a unique identifier for the API method being called.
+ resumable: MediaUpload, None if this is not a resumbale request.
+ """
+ self.uri = uri
+ self.method = method
+ self.body = body
+ self.headers = headers or {}
+ self.methodId = methodId
+ self.http = http
+ self.postproc = postproc
+ self.resumable = resumable
+ self.response_callbacks = []
+ self._in_error_state = False
+
+ # The size of the non-media part of the request.
+ self.body_size = len(self.body or '')
+
+ # The resumable URI to send chunks to.
+ self.resumable_uri = None
+
+ # The bytes that have been uploaded.
+ self.resumable_progress = 0
+
+ # Stubs for testing.
+ self._rand = random.random
+ self._sleep = time.sleep
+
+ @util.positional(1)
+ def execute(self, http=None, num_retries=0):
+ """Execute the request.
+
+ Args:
+ http: httplib2.Http, an http object to be used in place of the
+ one the HttpRequest request object was constructed with.
+ num_retries: Integer, number of times to retry with randomized
+ exponential backoff. If all retries fail, the raised HttpError
+ represents the last request. If zero (default), we attempt the
+ request only once.
+
+ Returns:
+ A deserialized object model of the response body as determined
+ by the postproc.
+
+ Raises:
+ googleapiclient.errors.HttpError if the response was not a 2xx.
+ httplib2.HttpLib2Error if a transport error has occured.
+ """
+ if http is None:
+ http = self.http
+
+ if self.resumable:
+ body = None
+ while body is None:
+ _, body = self.next_chunk(http=http, num_retries=num_retries)
+ return body
+
+ # Non-resumable case.
+
+ if 'content-length' not in self.headers:
+ self.headers['content-length'] = str(self.body_size)
+ # If the request URI is too long then turn it into a POST request.
+ # Assume that a GET request never contains a request body.
+ if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET':
+ self.method = 'POST'
+ self.headers['x-http-method-override'] = 'GET'
+ self.headers['content-type'] = 'application/x-www-form-urlencoded'
+ parsed = urlparse(self.uri)
+ self.uri = urlunparse(
+ (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None,
+ None)
+ )
+ self.body = parsed.query
+ self.headers['content-length'] = str(len(self.body))
+
+ # Handle retries for server-side errors.
+ resp, content = _retry_request(
+ http, num_retries, 'request', self._sleep, self._rand, str(self.uri),
+ method=str(self.method), body=self.body, headers=self.headers)
+
+ for callback in self.response_callbacks:
+ callback(resp)
+ if resp.status >= 300:
+ raise HttpError(resp, content, uri=self.uri)
+ return self.postproc(resp, content)
+
+ @util.positional(2)
+ def add_response_callback(self, cb):
+ """add_response_headers_callback
+
+ Args:
+ cb: Callback to be called on receiving the response headers, of signature:
+
+ def cb(resp):
+ # Where resp is an instance of httplib2.Response
+ """
+ self.response_callbacks.append(cb)
+
+ @util.positional(1)
+ def next_chunk(self, http=None, num_retries=0):
+ """Execute the next step of a resumable upload.
+
+ Can only be used if the method being executed supports media uploads and
+ the MediaUpload object passed in was flagged as using resumable upload.
+
+ Example:
+
+ media = MediaFileUpload('cow.png', mimetype='image/png',
+ chunksize=1000, resumable=True)
+ request = farm.animals().insert(
+ id='cow',
+ name='cow.png',
+ media_body=media)
+
+ response = None
+ while response is None:
+ status, response = request.next_chunk()
+ if status:
+ print "Upload %d%% complete." % int(status.progress() * 100)
+
+
+ Args:
+ http: httplib2.Http, an http object to be used in place of the
+ one the HttpRequest request object was constructed with.
+ num_retries: Integer, number of times to retry with randomized
+ exponential backoff. If all retries fail, the raised HttpError
+ represents the last request. If zero (default), we attempt the
+ request only once.
+
+ Returns:
+ (status, body): (ResumableMediaStatus, object)
+ The body will be None until the resumable media is fully uploaded.
+
+ Raises:
+ googleapiclient.errors.HttpError if the response was not a 2xx.
+ httplib2.HttpLib2Error if a transport error has occured.
+ """
+ if http is None:
+ http = self.http
+
+ if self.resumable.size() is None:
+ size = '*'
+ else:
+ size = str(self.resumable.size())
+
+ if self.resumable_uri is None:
+ start_headers = copy.copy(self.headers)
+ start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
+ if size != '*':
+ start_headers['X-Upload-Content-Length'] = size
+ start_headers['content-length'] = str(self.body_size)
+
+ resp, content = _retry_request(
+ http, num_retries, 'resumable URI request', self._sleep, self._rand,
+ self.uri, method=self.method, body=self.body, headers=start_headers)
+
+ if resp.status == 200 and 'location' in resp:
+ self.resumable_uri = resp['location']
+ else:
+ raise ResumableUploadError(resp, content)
+ elif self._in_error_state:
+ # If we are in an error state then query the server for current state of
+ # the upload by sending an empty PUT and reading the 'range' header in
+ # the response.
+ headers = {
+ 'Content-Range': 'bytes */%s' % size,
+ 'content-length': '0'
+ }
+ resp, content = http.request(self.resumable_uri, 'PUT',
+ headers=headers)
+ status, body = self._process_response(resp, content)
+ if body:
+ # The upload was complete.
+ return (status, body)
+
+ if self.resumable.has_stream():
+ data = self.resumable.stream()
+ if self.resumable.chunksize() == -1:
+ data.seek(self.resumable_progress)
+ chunk_end = self.resumable.size() - self.resumable_progress - 1
+ else:
+ # Doing chunking with a stream, so wrap a slice of the stream.
+ data = _StreamSlice(data, self.resumable_progress,
+ self.resumable.chunksize())
+ chunk_end = min(
+ self.resumable_progress + self.resumable.chunksize() - 1,
+ self.resumable.size() - 1)
+ else:
+ data = self.resumable.getbytes(
+ self.resumable_progress, self.resumable.chunksize())
+
+ # A short read implies that we are at EOF, so finish the upload.
+ if len(data) < self.resumable.chunksize():
+ size = str(self.resumable_progress + len(data))
+
+ chunk_end = self.resumable_progress + len(data) - 1
+
+ headers = {
+ 'Content-Range': 'bytes %d-%d/%s' % (
+ self.resumable_progress, chunk_end, size),
+ # Must set the content-length header here because httplib can't
+ # calculate the size when working with _StreamSlice.
+ 'Content-Length': str(chunk_end - self.resumable_progress + 1)
+ }
+
+ for retry_num in range(num_retries + 1):
+ if retry_num > 0:
+ self._sleep(self._rand() * 2**retry_num)
+ LOGGER.warning(
+ 'Retry #%d for media upload: %s %s, following status: %d'
+ % (retry_num, self.method, self.uri, resp.status))
+
+ try:
+ resp, content = http.request(self.resumable_uri, method='PUT',
+ body=data,
+ headers=headers)
+ except:
+ self._in_error_state = True
+ raise
+ if not _should_retry_response(resp.status, content):
+ break
+
+ return self._process_response(resp, content)
+
+ def _process_response(self, resp, content):
+ """Process the response from a single chunk upload.
+
+ Args:
+ resp: httplib2.Response, the response object.
+ content: string, the content of the response.
+
+ Returns:
+ (status, body): (ResumableMediaStatus, object)
+ The body will be None until the resumable media is fully uploaded.
+
+ Raises:
+ googleapiclient.errors.HttpError if the response was not a 2xx or a 308.
+ """
+ if resp.status in [200, 201]:
+ self._in_error_state = False
+ return None, self.postproc(resp, content)
+ elif resp.status == 308:
+ self._in_error_state = False
+ # A "308 Resume Incomplete" indicates we are not done.
+ try:
+ self.resumable_progress = int(resp['range'].split('-')[1]) + 1
+ except KeyError:
+ # If resp doesn't contain range header, resumable progress is 0
+ self.resumable_progress = 0
+ if 'location' in resp:
+ self.resumable_uri = resp['location']
+ else:
+ self._in_error_state = True
+ raise HttpError(resp, content, uri=self.uri)
+
+ return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
+ None)
+
+ def to_json(self):
+ """Returns a JSON representation of the HttpRequest."""
+ d = copy.copy(self.__dict__)
+ if d['resumable'] is not None:
+ d['resumable'] = self.resumable.to_json()
+ del d['http']
+ del d['postproc']
+ del d['_sleep']
+ del d['_rand']
+
+ return json.dumps(d)
+
+ @staticmethod
+ def from_json(s, http, postproc):
+ """Returns an HttpRequest populated with info from a JSON object."""
+ d = json.loads(s)
+ if d['resumable'] is not None:
+ d['resumable'] = MediaUpload.new_from_json(d['resumable'])
+ return HttpRequest(
+ http,
+ postproc,
+ uri=d['uri'],
+ method=d['method'],
+ body=d['body'],
+ headers=d['headers'],
+ methodId=d['methodId'],
+ resumable=d['resumable'])
+
+
+class BatchHttpRequest(object):
+ """Batches multiple HttpRequest objects into a single HTTP request.
+
+ Example:
+ from googleapiclient.http import BatchHttpRequest
+
+ def list_animals(request_id, response, exception):
+ \"\"\"Do something with the animals list response.\"\"\"
+ if exception is not None:
+ # Do something with the exception.
+ pass
+ else:
+ # Do something with the response.
+ pass
+
+ def list_farmers(request_id, response, exception):
+ \"\"\"Do something with the farmers list response.\"\"\"
+ if exception is not None:
+ # Do something with the exception.
+ pass
+ else:
+ # Do something with the response.
+ pass
+
+ service = build('farm', 'v2')
+
+ batch = BatchHttpRequest()
+
+ batch.add(service.animals().list(), list_animals)
+ batch.add(service.farmers().list(), list_farmers)
+ batch.execute(http=http)
+ """
+
+ @util.positional(1)
+ def __init__(self, callback=None, batch_uri=None):
+ """Constructor for a BatchHttpRequest.
+
+ Args:
+ callback: callable, A callback to be called for each response, of the
+ form callback(id, response, exception). The first parameter is the
+ request id, and the second is the deserialized response object. The
+ third is an googleapiclient.errors.HttpError exception object if an HTTP error
+ occurred while processing the request, or None if no error occurred.
+ batch_uri: string, URI to send batch requests to.
+ """
+ if batch_uri is None:
+ batch_uri = _LEGACY_BATCH_URI
+
+ if batch_uri == _LEGACY_BATCH_URI:
+ LOGGER.warn(
+ "You have constructed a BatchHttpRequest using the legacy batch "
+ "endpoint %s. This endpoint will be turned down on March 25, 2019. "
+ "Please provide the API-specific endpoint or use "
+ "service.new_batch_http_request(). For more details see "
+ "https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html"
+ "and https://developers.google.com/api-client-library/python/guide/batch.",
+ _LEGACY_BATCH_URI)
+ self._batch_uri = batch_uri
+
+ # Global callback to be called for each individual response in the batch.
+ self._callback = callback
+
+ # A map from id to request.
+ self._requests = {}
+
+ # A map from id to callback.
+ self._callbacks = {}
+
+ # List of request ids, in the order in which they were added.
+ self._order = []
+
+ # The last auto generated id.
+ self._last_auto_id = 0
+
+ # Unique ID on which to base the Content-ID headers.
+ self._base_id = None
+
+ # A map from request id to (httplib2.Response, content) response pairs
+ self._responses = {}
+
+ # A map of id(Credentials) that have been refreshed.
+ self._refreshed_credentials = {}
+
+ def _refresh_and_apply_credentials(self, request, http):
+ """Refresh the credentials and apply to the request.
+
+ Args:
+ request: HttpRequest, the request.
+ http: httplib2.Http, the global http object for the batch.
+ """
+ # For the credentials to refresh, but only once per refresh_token
+ # If there is no http per the request then refresh the http passed in
+ # via execute()
+ creds = None
+ request_credentials = False
+
+ if request.http is not None:
+ creds = _auth.get_credentials_from_http(request.http)
+ request_credentials = True
+
+ if creds is None and http is not None:
+ creds = _auth.get_credentials_from_http(http)
+
+ if creds is not None:
+ if id(creds) not in self._refreshed_credentials:
+ _auth.refresh_credentials(creds)
+ self._refreshed_credentials[id(creds)] = 1
+
+ # Only apply the credentials if we are using the http object passed in,
+ # otherwise apply() will get called during _serialize_request().
+ if request.http is None or not request_credentials:
+ _auth.apply_credentials(creds, request.headers)
+
+
+ def _id_to_header(self, id_):
+ """Convert an id to a Content-ID header value.
+
+ Args:
+ id_: string, identifier of individual request.
+
+ Returns:
+ A Content-ID header with the id_ encoded into it. A UUID is prepended to
+ the value because Content-ID headers are supposed to be universally
+ unique.
+ """
+ if self._base_id is None:
+ self._base_id = uuid.uuid4()
+
+ # NB: we intentionally leave whitespace between base/id and '+', so RFC2822
+ # line folding works properly on Python 3; see
+ # https://github.com/google/google-api-python-client/issues/164
+ return '<%s + %s>' % (self._base_id, quote(id_))
+
+ def _header_to_id(self, header):
+ """Convert a Content-ID header value to an id.
+
+ Presumes the Content-ID header conforms to the format that _id_to_header()
+ returns.
+
+ Args:
+ header: string, Content-ID header value.
+
+ Returns:
+ The extracted id value.
+
+ Raises:
+ BatchError if the header is not in the expected format.
+ """
+ if header[0] != '<' or header[-1] != '>':
+ raise BatchError("Invalid value for Content-ID: %s" % header)
+ if '+' not in header:
+ raise BatchError("Invalid value for Content-ID: %s" % header)
+ base, id_ = header[1:-1].split(' + ', 1)
+
+ return unquote(id_)
+
+ def _serialize_request(self, request):
+ """Convert an HttpRequest object into a string.
+
+ Args:
+ request: HttpRequest, the request to serialize.
+
+ Returns:
+ The request as a string in application/http format.
+ """
+ # Construct status line
+ parsed = urlparse(request.uri)
+ request_line = urlunparse(
+ ('', '', parsed.path, parsed.params, parsed.query, '')
+ )
+ status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
+ major, minor = request.headers.get('content-type', 'application/json').split('/')
+ msg = MIMENonMultipart(major, minor)
+ headers = request.headers.copy()
+
+ if request.http is not None:
+ credentials = _auth.get_credentials_from_http(request.http)
+ if credentials is not None:
+ _auth.apply_credentials(credentials, headers)
+
+ # MIMENonMultipart adds its own Content-Type header.
+ if 'content-type' in headers:
+ del headers['content-type']
+
+ for key, value in six.iteritems(headers):
+ msg[key] = value
+ msg['Host'] = parsed.netloc
+ msg.set_unixfrom(None)
+
+ if request.body is not None:
+ msg.set_payload(request.body)
+ msg['content-length'] = str(len(request.body))
+
+ # Serialize the mime message.
+ fp = StringIO()
+ # maxheaderlen=0 means don't line wrap headers.
+ g = Generator(fp, maxheaderlen=0)
+ g.flatten(msg, unixfrom=False)
+ body = fp.getvalue()
+
+ return status_line + body
+
+ def _deserialize_response(self, payload):
+ """Convert string into httplib2 response and content.
+
+ Args:
+ payload: string, headers and body as a string.
+
+ Returns:
+ A pair (resp, content), such as would be returned from httplib2.request.
+ """
+ # Strip off the status line
+ status_line, payload = payload.split('\n', 1)
+ protocol, status, reason = status_line.split(' ', 2)
+
+ # Parse the rest of the response
+ parser = FeedParser()
+ parser.feed(payload)
+ msg = parser.close()
+ msg['status'] = status
+
+ # Create httplib2.Response from the parsed headers.
+ resp = httplib2.Response(msg)
+ resp.reason = reason
+ resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
+
+ content = payload.split('\r\n\r\n', 1)[1]
+
+ return resp, content
+
+ def _new_id(self):
+ """Create a new id.
+
+ Auto incrementing number that avoids conflicts with ids already used.
+
+ Returns:
+ string, a new unique id.
+ """
+ self._last_auto_id += 1
+ while str(self._last_auto_id) in self._requests:
+ self._last_auto_id += 1
+ return str(self._last_auto_id)
+
+ @util.positional(2)
+ def add(self, request, callback=None, request_id=None):
+ """Add a new request.
+
+ Every callback added will be paired with a unique id, the request_id. That
+ unique id will be passed back to the callback when the response comes back
+ from the server. The default behavior is to have the library generate it's
+ own unique id. If the caller passes in a request_id then they must ensure
+ uniqueness for each request_id, and if they are not an exception is
+ raised. Callers should either supply all request_ids or never supply a
+ request id, to avoid such an error.
+
+ Args:
+ request: HttpRequest, Request to add to the batch.
+ callback: callable, A callback to be called for this response, of the
+ form callback(id, response, exception). The first parameter is the
+ request id, and the second is the deserialized response object. The
+ third is an googleapiclient.errors.HttpError exception object if an HTTP error
+ occurred while processing the request, or None if no errors occurred.
+ request_id: string, A unique id for the request. The id will be passed
+ to the callback with the response.
+
+ Returns:
+ None
+
+ Raises:
+ BatchError if a media request is added to a batch.
+ KeyError is the request_id is not unique.
+ """
+
+ if len(self._order) >= MAX_BATCH_LIMIT:
+ raise BatchError("Exceeded the maximum calls(%d) in a single batch request."
+ % MAX_BATCH_LIMIT)
+ if request_id is None:
+ request_id = self._new_id()
+ if request.resumable is not None:
+ raise BatchError("Media requests cannot be used in a batch request.")
+ if request_id in self._requests:
+ raise KeyError("A request with this ID already exists: %s" % request_id)
+ self._requests[request_id] = request
+ self._callbacks[request_id] = callback
+ self._order.append(request_id)
+
+ def _execute(self, http, order, requests):
+ """Serialize batch request, send to server, process response.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the request with.
+ order: list, list of request ids in the order they were added to the
+ batch.
+ request: list, list of request objects to send.
+
+ Raises:
+ httplib2.HttpLib2Error if a transport error has occured.
+ googleapiclient.errors.BatchError if the response is the wrong format.
+ """
+ message = MIMEMultipart('mixed')
+ # Message should not write out it's own headers.
+ setattr(message, '_write_headers', lambda self: None)
+
+ # Add all the individual requests.
+ for request_id in order:
+ request = requests[request_id]
+
+ msg = MIMENonMultipart('application', 'http')
+ msg['Content-Transfer-Encoding'] = 'binary'
+ msg['Content-ID'] = self._id_to_header(request_id)
+
+ body = self._serialize_request(request)
+ msg.set_payload(body)
+ message.attach(msg)
+
+ # encode the body: note that we can't use `as_string`, because
+ # it plays games with `From ` lines.
+ fp = StringIO()
+ g = Generator(fp, mangle_from_=False)
+ g.flatten(message, unixfrom=False)
+ body = fp.getvalue()
+
+ headers = {}
+ headers['content-type'] = ('multipart/mixed; '
+ 'boundary="%s"') % message.get_boundary()
+
+ resp, content = http.request(self._batch_uri, method='POST', body=body,
+ headers=headers)
+
+ if resp.status >= 300:
+ raise HttpError(resp, content, uri=self._batch_uri)
+
+ # Prepend with a content-type header so FeedParser can handle it.
+ header = 'content-type: %s\r\n\r\n' % resp['content-type']
+ # PY3's FeedParser only accepts unicode. So we should decode content
+ # here, and encode each payload again.
+ if six.PY3:
+ content = content.decode('utf-8')
+ for_parser = header + content
+
+ parser = FeedParser()
+ parser.feed(for_parser)
+ mime_response = parser.close()
+
+ if not mime_response.is_multipart():
+ raise BatchError("Response not in multipart/mixed format.", resp=resp,
+ content=content)
+
+ for part in mime_response.get_payload():
+ request_id = self._header_to_id(part['Content-ID'])
+ response, content = self._deserialize_response(part.get_payload())
+ # We encode content here to emulate normal http response.
+ if isinstance(content, six.text_type):
+ content = content.encode('utf-8')
+ self._responses[request_id] = (response, content)
+
+ @util.positional(1)
+ def execute(self, http=None):
+ """Execute all the requests as a single batched HTTP request.
+
+ Args:
+ http: httplib2.Http, an http object to be used in place of the one the
+ HttpRequest request object was constructed with. If one isn't supplied
+ then use a http object from the requests in this batch.
+
+ Returns:
+ None
+
+ Raises:
+ httplib2.HttpLib2Error if a transport error has occured.
+ googleapiclient.errors.BatchError if the response is the wrong format.
+ """
+ # If we have no requests return
+ if len(self._order) == 0:
+ return None
+
+ # If http is not supplied use the first valid one given in the requests.
+ if http is None:
+ for request_id in self._order:
+ request = self._requests[request_id]
+ if request is not None:
+ http = request.http
+ break
+
+ if http is None:
+ raise ValueError("Missing a valid http object.")
+
+ # Special case for OAuth2Credentials-style objects which have not yet been
+ # refreshed with an initial access_token.
+ creds = _auth.get_credentials_from_http(http)
+ if creds is not None:
+ if not _auth.is_valid(creds):
+ LOGGER.info('Attempting refresh to obtain initial access_token')
+ _auth.refresh_credentials(creds)
+
+ self._execute(http, self._order, self._requests)
+
+ # Loop over all the requests and check for 401s. For each 401 request the
+ # credentials should be refreshed and then sent again in a separate batch.
+ redo_requests = {}
+ redo_order = []
+
+ for request_id in self._order:
+ resp, content = self._responses[request_id]
+ if resp['status'] == '401':
+ redo_order.append(request_id)
+ request = self._requests[request_id]
+ self._refresh_and_apply_credentials(request, http)
+ redo_requests[request_id] = request
+
+ if redo_requests:
+ self._execute(http, redo_order, redo_requests)
+
+ # Now process all callbacks that are erroring, and raise an exception for
+ # ones that return a non-2xx response? Or add extra parameter to callback
+ # that contains an HttpError?
+
+ for request_id in self._order:
+ resp, content = self._responses[request_id]
+
+ request = self._requests[request_id]
+ callback = self._callbacks[request_id]
+
+ response = None
+ exception = None
+ try:
+ if resp.status >= 300:
+ raise HttpError(resp, content, uri=request.uri)
+ response = request.postproc(resp, content)
+ except HttpError as e:
+ exception = e
+
+ if callback is not None:
+ callback(request_id, response, exception)
+ if self._callback is not None:
+ self._callback(request_id, response, exception)
+
+
+class HttpRequestMock(object):
+ """Mock of HttpRequest.
+
+ Do not construct directly, instead use RequestMockBuilder.
+ """
+
+ def __init__(self, resp, content, postproc):
+ """Constructor for HttpRequestMock
+
+ Args:
+ resp: httplib2.Response, the response to emulate coming from the request
+ content: string, the response body
+ postproc: callable, the post processing function usually supplied by
+ the model class. See model.JsonModel.response() as an example.
+ """
+ self.resp = resp
+ self.content = content
+ self.postproc = postproc
+ if resp is None:
+ self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
+ if 'reason' in self.resp:
+ self.resp.reason = self.resp['reason']
+
+ def execute(self, http=None):
+ """Execute the request.
+
+ Same behavior as HttpRequest.execute(), but the response is
+ mocked and not really from an HTTP request/response.
+ """
+ return self.postproc(self.resp, self.content)
+
+
+class RequestMockBuilder(object):
+ """A simple mock of HttpRequest
+
+ Pass in a dictionary to the constructor that maps request methodIds to
+ tuples of (httplib2.Response, content, opt_expected_body) that should be
+ returned when that method is called. None may also be passed in for the
+ httplib2.Response, in which case a 200 OK response will be generated.
+ If an opt_expected_body (str or dict) is provided, it will be compared to
+ the body and UnexpectedBodyError will be raised on inequality.
+
+ Example:
+ response = '{"data": {"id": "tag:google.c...'
+ requestBuilder = RequestMockBuilder(
+ {
+ 'plus.activities.get': (None, response),
+ }
+ )
+ googleapiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
+
+ Methods that you do not supply a response for will return a
+ 200 OK with an empty string as the response content or raise an excpetion
+ if check_unexpected is set to True. The methodId is taken from the rpcName
+ in the discovery document.
+
+ For more details see the project wiki.
+ """
+
+ def __init__(self, responses, check_unexpected=False):
+ """Constructor for RequestMockBuilder
+
+ The constructed object should be a callable object
+ that can replace the class HttpResponse.
+
+ responses - A dictionary that maps methodIds into tuples
+ of (httplib2.Response, content). The methodId
+ comes from the 'rpcName' field in the discovery
+ document.
+ check_unexpected - A boolean setting whether or not UnexpectedMethodError
+ should be raised on unsupplied method.
+ """
+ self.responses = responses
+ self.check_unexpected = check_unexpected
+
+ def __call__(self, http, postproc, uri, method='GET', body=None,
+ headers=None, methodId=None, resumable=None):
+ """Implements the callable interface that discovery.build() expects
+ of requestBuilder, which is to build an object compatible with
+ HttpRequest.execute(). See that method for the description of the
+ parameters and the expected response.
+ """
+ if methodId in self.responses:
+ response = self.responses[methodId]
+ resp, content = response[:2]
+ if len(response) > 2:
+ # Test the body against the supplied expected_body.
+ expected_body = response[2]
+ if bool(expected_body) != bool(body):
+ # Not expecting a body and provided one
+ # or expecting a body and not provided one.
+ raise UnexpectedBodyError(expected_body, body)
+ if isinstance(expected_body, str):
+ expected_body = json.loads(expected_body)
+ body = json.loads(body)
+ if body != expected_body:
+ raise UnexpectedBodyError(expected_body, body)
+ return HttpRequestMock(resp, content, postproc)
+ elif self.check_unexpected:
+ raise UnexpectedMethodError(methodId=methodId)
+ else:
+ model = JsonModel(False)
+ return HttpRequestMock(None, '{}', model.response)
+
+
+class HttpMock(object):
+ """Mock of httplib2.Http"""
+
+ def __init__(self, filename=None, headers=None):
+ """
+ Args:
+ filename: string, absolute filename to read response from
+ headers: dict, header to return with response
+ """
+ if headers is None:
+ headers = {'status': '200'}
+ if filename:
+ f = open(filename, 'rb')
+ self.data = f.read()
+ f.close()
+ else:
+ self.data = None
+ self.response_headers = headers
+ self.headers = None
+ self.uri = None
+ self.method = None
+ self.body = None
+ self.headers = None
+
+
+ def request(self, uri,
+ method='GET',
+ body=None,
+ headers=None,
+ redirections=1,
+ connection_type=None):
+ self.uri = uri
+ self.method = method
+ self.body = body
+ self.headers = headers
+ return httplib2.Response(self.response_headers), self.data
+
+
+class HttpMockSequence(object):
+ """Mock of httplib2.Http
+
+ Mocks a sequence of calls to request returning different responses for each
+ call. Create an instance initialized with the desired response headers
+ and content and then use as if an httplib2.Http instance.
+
+ http = HttpMockSequence([
+ ({'status': '401'}, ''),
+ ({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
+ ({'status': '200'}, 'echo_request_headers'),
+ ])
+ resp, content = http.request("http://examples.com")
+
+ There are special values you can pass in for content to trigger
+ behavours that are helpful in testing.
+
+ 'echo_request_headers' means return the request headers in the response body
+ 'echo_request_headers_as_json' means return the request headers in
+ the response body
+ 'echo_request_body' means return the request body in the response body
+ 'echo_request_uri' means return the request uri in the response body
+ """
+
+ def __init__(self, iterable):
+ """
+ Args:
+ iterable: iterable, a sequence of pairs of (headers, body)
+ """
+ self._iterable = iterable
+ self.follow_redirects = True
+
+ def request(self, uri,
+ method='GET',
+ body=None,
+ headers=None,
+ redirections=1,
+ connection_type=None):
+ resp, content = self._iterable.pop(0)
+ if content == 'echo_request_headers':
+ content = headers
+ elif content == 'echo_request_headers_as_json':
+ content = json.dumps(headers)
+ elif content == 'echo_request_body':
+ if hasattr(body, 'read'):
+ content = body.read()
+ else:
+ content = body
+ elif content == 'echo_request_uri':
+ content = uri
+ if isinstance(content, six.text_type):
+ content = content.encode('utf-8')
+ return httplib2.Response(resp), content
+
+
+def set_user_agent(http, user_agent):
+ """Set the user-agent on every request.
+
+ Args:
+ http - An instance of httplib2.Http
+ or something that acts like it.
+ user_agent: string, the value for the user-agent header.
+
+ Returns:
+ A modified instance of http that was passed in.
+
+ Example:
+
+ h = httplib2.Http()
+ h = set_user_agent(h, "my-app-name/6.0")
+
+ Most of the time the user-agent will be set doing auth, this is for the rare
+ cases where you are accessing an unauthenticated endpoint.
+ """
+ request_orig = http.request
+
+ # The closure that will replace 'httplib2.Http.request'.
+ def new_request(uri, method='GET', body=None, headers=None,
+ redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+ connection_type=None):
+ """Modify the request headers to add the user-agent."""
+ if headers is None:
+ headers = {}
+ if 'user-agent' in headers:
+ headers['user-agent'] = user_agent + ' ' + headers['user-agent']
+ else:
+ headers['user-agent'] = user_agent
+ resp, content = request_orig(uri, method=method, body=body, headers=headers,
+ redirections=redirections, connection_type=connection_type)
+ return resp, content
+
+ http.request = new_request
+ return http
+
+
+def tunnel_patch(http):
+ """Tunnel PATCH requests over POST.
+ Args:
+ http - An instance of httplib2.Http
+ or something that acts like it.
+
+ Returns:
+ A modified instance of http that was passed in.
+
+ Example:
+
+ h = httplib2.Http()
+ h = tunnel_patch(h, "my-app-name/6.0")
+
+ Useful if you are running on a platform that doesn't support PATCH.
+ Apply this last if you are using OAuth 1.0, as changing the method
+ will result in a different signature.
+ """
+ request_orig = http.request
+
+ # The closure that will replace 'httplib2.Http.request'.
+ def new_request(uri, method='GET', body=None, headers=None,
+ redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+ connection_type=None):
+ """Modify the request headers to add the user-agent."""
+ if headers is None:
+ headers = {}
+ if method == 'PATCH':
+ if 'oauth_token' in headers.get('authorization', ''):
+ LOGGER.warning(
+ 'OAuth 1.0 request made with Credentials after tunnel_patch.')
+ headers['x-http-method-override'] = "PATCH"
+ method = 'POST'
+ resp, content = request_orig(uri, method=method, body=body, headers=headers,
+ redirections=redirections, connection_type=connection_type)
+ return resp, content
+
+ http.request = new_request
+ return http
+
+
+def build_http():
+ """Builds httplib2.Http object
+
+ Returns:
+ A httplib2.Http object, which is used to make http requests, and which has timeout set by default.
+ To override default timeout call
+
+ socket.setdefaulttimeout(timeout_in_sec)
+
+ before interacting with this method.
+ """
+ if socket.getdefaulttimeout() is not None:
+ http_timeout = socket.getdefaulttimeout()
+ else:
+ http_timeout = DEFAULT_HTTP_TIMEOUT_SEC
+ return httplib2.Http(timeout=http_timeout)
diff --git a/googleapiclient/mimeparse.py b/googleapiclient/mimeparse.py
new file mode 100644
index 0000000..bc9ad09
--- /dev/null
+++ b/googleapiclient/mimeparse.py
@@ -0,0 +1,175 @@
+# Copyright 2014 Joe Gregorio
+#
+# Licensed under the MIT License
+
+"""MIME-Type Parser
+
+This module provides basic functions for handling mime-types. It can handle
+matching mime-types against a list of media-ranges. See section 14.1 of the
+HTTP specification [RFC 2616] for a complete explanation.
+
+ http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
+
+Contents:
+ - parse_mime_type(): Parses a mime-type into its component parts.
+ - parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
+ quality parameter.
+ - quality(): Determines the quality ('q') of a mime-type when
+ compared against a list of media-ranges.
+ - quality_parsed(): Just like quality() except the second parameter must be
+ pre-parsed.
+ - best_match(): Choose the mime-type with the highest quality ('q')
+ from a list of candidates.
+"""
+from __future__ import absolute_import
+from functools import reduce
+import six
+
+__version__ = '0.1.3'
+__author__ = 'Joe Gregorio'
+__email__ = 'joe@bitworking.org'
+__license__ = 'MIT License'
+__credits__ = ''
+
+
+def parse_mime_type(mime_type):
+ """Parses a mime-type into its component parts.
+
+ Carves up a mime-type and returns a tuple of the (type, subtype, params)
+ where 'params' is a dictionary of all the parameters for the media range.
+ For example, the media range 'application/xhtml;q=0.5' would get parsed
+ into:
+
+ ('application', 'xhtml', {'q', '0.5'})
+ """
+ parts = mime_type.split(';')
+ params = dict([tuple([s.strip() for s in param.split('=', 1)])\
+ for param in parts[1:]
+ ])
+ full_type = parts[0].strip()
+ # Java URLConnection class sends an Accept header that includes a
+ # single '*'. Turn it into a legal wildcard.
+ if full_type == '*':
+ full_type = '*/*'
+ (type, subtype) = full_type.split('/')
+
+ return (type.strip(), subtype.strip(), params)
+
+
+def parse_media_range(range):
+ """Parse a media-range into its component parts.
+
+ Carves up a media range and returns a tuple of the (type, subtype,
+ params) where 'params' is a dictionary of all the parameters for the media
+ range. For example, the media range 'application/*;q=0.5' would get parsed
+ into:
+
+ ('application', '*', {'q', '0.5'})
+
+ In addition this function also guarantees that there is a value for 'q'
+ in the params dictionary, filling it in with a proper default if
+ necessary.
+ """
+ (type, subtype, params) = parse_mime_type(range)
+ if 'q' not in params or not params['q'] or \
+ not float(params['q']) or float(params['q']) > 1\
+ or float(params['q']) < 0:
+ params['q'] = '1'
+
+ return (type, subtype, params)
+
+
+def fitness_and_quality_parsed(mime_type, parsed_ranges):
+ """Find the best match for a mime-type amongst parsed media-ranges.
+
+ Find the best match for a given mime-type against a list of media_ranges
+ that have already been parsed by parse_media_range(). Returns a tuple of
+ the fitness value and the value of the 'q' quality parameter of the best
+ match, or (-1, 0) if no match was found. Just as for quality_parsed(),
+ 'parsed_ranges' must be a list of parsed media ranges.
+ """
+ best_fitness = -1
+ best_fit_q = 0
+ (target_type, target_subtype, target_params) =\
+ parse_media_range(mime_type)
+ for (type, subtype, params) in parsed_ranges:
+ type_match = (type == target_type or\
+ type == '*' or\
+ target_type == '*')
+ subtype_match = (subtype == target_subtype or\
+ subtype == '*' or\
+ target_subtype == '*')
+ if type_match and subtype_match:
+ param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in \
+ six.iteritems(target_params) if key != 'q' and \
+ key in params and value == params[key]], 0)
+ fitness = (type == target_type) and 100 or 0
+ fitness += (subtype == target_subtype) and 10 or 0
+ fitness += param_matches
+ if fitness > best_fitness:
+ best_fitness = fitness
+ best_fit_q = params['q']
+
+ return best_fitness, float(best_fit_q)
+
+
+def quality_parsed(mime_type, parsed_ranges):
+ """Find the best match for a mime-type amongst parsed media-ranges.
+
+ Find the best match for a given mime-type against a list of media_ranges
+ that have already been parsed by parse_media_range(). Returns the 'q'
+ quality parameter of the best match, 0 if no match was found. This function
+ bahaves the same as quality() except that 'parsed_ranges' must be a list of
+ parsed media ranges.
+ """
+
+ return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
+
+
+def quality(mime_type, ranges):
+ """Return the quality ('q') of a mime-type against a list of media-ranges.
+
+ Returns the quality 'q' of a mime-type when compared against the
+ media-ranges in ranges. For example:
+
+ >>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
+ text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
+ 0.7
+
+ """
+ parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
+
+ return quality_parsed(mime_type, parsed_ranges)
+
+
+def best_match(supported, header):
+ """Return mime-type with the highest quality ('q') from list of candidates.
+
+ Takes a list of supported mime-types and finds the best match for all the
+ media-ranges listed in header. The value of header must be a string that
+ conforms to the format of the HTTP Accept: header. The value of 'supported'
+ is a list of mime-types. The list of supported mime-types should be sorted
+ in order of increasing desirability, in case of a situation where there is
+ a tie.
+
+ >>> best_match(['application/xbel+xml', 'text/xml'],
+ 'text/*;q=0.5,*/*; q=0.1')
+ 'text/xml'
+ """
+ split_header = _filter_blank(header.split(','))
+ parsed_header = [parse_media_range(r) for r in split_header]
+ weighted_matches = []
+ pos = 0
+ for mime_type in supported:
+ weighted_matches.append((fitness_and_quality_parsed(mime_type,
+ parsed_header), pos, mime_type))
+ pos += 1
+ weighted_matches.sort()
+
+ return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
+
+
+def _filter_blank(i):
+ for s in i:
+ if s.strip():
+ yield s
diff --git a/googleapiclient/model.py b/googleapiclient/model.py
new file mode 100644
index 0000000..dded04e
--- /dev/null
+++ b/googleapiclient/model.py
@@ -0,0 +1,389 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Model objects for requests and responses.
+
+Each API may support one or more serializations, such
+as JSON, Atom, etc. The model classes are responsible
+for converting between the wire format and the Python
+object representation.
+"""
+from __future__ import absolute_import
+import six
+
+__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+
+import json
+import logging
+
+from six.moves.urllib.parse import urlencode
+
+from googleapiclient import __version__
+from googleapiclient.errors import HttpError
+
+
+LOGGER = logging.getLogger(__name__)
+
+dump_request_response = False
+
+
+def _abstract():
+ raise NotImplementedError('You need to override this function')
+
+
+class Model(object):
+ """Model base class.
+
+ All Model classes should implement this interface.
+ The Model serializes and de-serializes between a wire
+ format such as JSON and a Python object representation.
+ """
+
+ def request(self, headers, path_params, query_params, body_value):
+ """Updates outgoing requests with a serialized body.
+
+ Args:
+ headers: dict, request headers
+ path_params: dict, parameters that appear in the request path
+ query_params: dict, parameters that appear in the query
+ body_value: object, the request body as a Python object, which must be
+ serializable.
+ Returns:
+ A tuple of (headers, path_params, query, body)
+
+ headers: dict, request headers
+ path_params: dict, parameters that appear in the request path
+ query: string, query part of the request URI
+ body: string, the body serialized in the desired wire format.
+ """
+ _abstract()
+
+ def response(self, resp, content):
+ """Convert the response wire format into a Python object.
+
+ Args:
+ resp: httplib2.Response, the HTTP response headers and status
+ content: string, the body of the HTTP response
+
+ Returns:
+ The body de-serialized as a Python object.
+
+ Raises:
+ googleapiclient.errors.HttpError if a non 2xx response is received.
+ """
+ _abstract()
+
+
+class BaseModel(Model):
+ """Base model class.
+
+ Subclasses should provide implementations for the "serialize" and
+ "deserialize" methods, as well as values for the following class attributes.
+
+ Attributes:
+ accept: The value to use for the HTTP Accept header.
+ content_type: The value to use for the HTTP Content-type header.
+ no_content_response: The value to return when deserializing a 204 "No
+ Content" response.
+ alt_param: The value to supply as the "alt" query parameter for requests.
+ """
+
+ accept = None
+ content_type = None
+ no_content_response = None
+ alt_param = None
+
+ def _log_request(self, headers, path_params, query, body):
+ """Logs debugging information about the request if requested."""
+ if dump_request_response:
+ LOGGER.info('--request-start--')
+ LOGGER.info('-headers-start-')
+ for h, v in six.iteritems(headers):
+ LOGGER.info('%s: %s', h, v)
+ LOGGER.info('-headers-end-')
+ LOGGER.info('-path-parameters-start-')
+ for h, v in six.iteritems(path_params):
+ LOGGER.info('%s: %s', h, v)
+ LOGGER.info('-path-parameters-end-')
+ LOGGER.info('body: %s', body)
+ LOGGER.info('query: %s', query)
+ LOGGER.info('--request-end--')
+
+ def request(self, headers, path_params, query_params, body_value):
+ """Updates outgoing requests with a serialized body.
+
+ Args:
+ headers: dict, request headers
+ path_params: dict, parameters that appear in the request path
+ query_params: dict, parameters that appear in the query
+ body_value: object, the request body as a Python object, which must be
+ serializable by json.
+ Returns:
+ A tuple of (headers, path_params, query, body)
+
+ headers: dict, request headers
+ path_params: dict, parameters that appear in the request path
+ query: string, query part of the request URI
+ body: string, the body serialized as JSON
+ """
+ query = self._build_query(query_params)
+ headers['accept'] = self.accept
+ headers['accept-encoding'] = 'gzip, deflate'
+ if 'user-agent' in headers:
+ headers['user-agent'] += ' '
+ else:
+ headers['user-agent'] = ''
+ headers['user-agent'] += 'google-api-python-client/%s (gzip)' % __version__
+
+ if body_value is not None:
+ headers['content-type'] = self.content_type
+ body_value = self.serialize(body_value)
+ self._log_request(headers, path_params, query, body_value)
+ return (headers, path_params, query, body_value)
+
+ def _build_query(self, params):
+ """Builds a query string.
+
+ Args:
+ params: dict, the query parameters
+
+ Returns:
+ The query parameters properly encoded into an HTTP URI query string.
+ """
+ if self.alt_param is not None:
+ params.update({'alt': self.alt_param})
+ astuples = []
+ for key, value in six.iteritems(params):
+ if type(value) == type([]):
+ for x in value:
+ x = x.encode('utf-8')
+ astuples.append((key, x))
+ else:
+ if isinstance(value, six.text_type) and callable(value.encode):
+ value = value.encode('utf-8')
+ astuples.append((key, value))
+ return '?' + urlencode(astuples)
+
+ def _log_response(self, resp, content):
+ """Logs debugging information about the response if requested."""
+ if dump_request_response:
+ LOGGER.info('--response-start--')
+ for h, v in six.iteritems(resp):
+ LOGGER.info('%s: %s', h, v)
+ if content:
+ LOGGER.info(content)
+ LOGGER.info('--response-end--')
+
+ def response(self, resp, content):
+ """Convert the response wire format into a Python object.
+
+ Args:
+ resp: httplib2.Response, the HTTP response headers and status
+ content: string, the body of the HTTP response
+
+ Returns:
+ The body de-serialized as a Python object.
+
+ Raises:
+ googleapiclient.errors.HttpError if a non 2xx response is received.
+ """
+ self._log_response(resp, content)
+ # Error handling is TBD, for example, do we retry
+ # for some operation/error combinations?
+ if resp.status < 300:
+ if resp.status == 204:
+ # A 204: No Content response should be treated differently
+ # to all the other success states
+ return self.no_content_response
+ return self.deserialize(content)
+ else:
+ LOGGER.debug('Content from bad request was: %s' % content)
+ raise HttpError(resp, content)
+
+ def serialize(self, body_value):
+ """Perform the actual Python object serialization.
+
+ Args:
+ body_value: object, the request body as a Python object.
+
+ Returns:
+ string, the body in serialized form.
+ """
+ _abstract()
+
+ def deserialize(self, content):
+ """Perform the actual deserialization from response string to Python
+ object.
+
+ Args:
+ content: string, the body of the HTTP response
+
+ Returns:
+ The body de-serialized as a Python object.
+ """
+ _abstract()
+
+
+class JsonModel(BaseModel):
+ """Model class for JSON.
+
+ Serializes and de-serializes between JSON and the Python
+ object representation of HTTP request and response bodies.
+ """
+ accept = 'application/json'
+ content_type = 'application/json'
+ alt_param = 'json'
+
+ def __init__(self, data_wrapper=False):
+ """Construct a JsonModel.
+
+ Args:
+ data_wrapper: boolean, wrap requests and responses in a data wrapper
+ """
+ self._data_wrapper = data_wrapper
+
+ def serialize(self, body_value):
+ if (isinstance(body_value, dict) and 'data' not in body_value and
+ self._data_wrapper):
+ body_value = {'data': body_value}
+ return json.dumps(body_value)
+
+ def deserialize(self, content):
+ try:
+ content = content.decode('utf-8')
+ except AttributeError:
+ pass
+ body = json.loads(content)
+ if self._data_wrapper and isinstance(body, dict) and 'data' in body:
+ body = body['data']
+ return body
+
+ @property
+ def no_content_response(self):
+ return {}
+
+
+class RawModel(JsonModel):
+ """Model class for requests that don't return JSON.
+
+ Serializes and de-serializes between JSON and the Python
+ object representation of HTTP request, and returns the raw bytes
+ of the response body.
+ """
+ accept = '*/*'
+ content_type = 'application/json'
+ alt_param = None
+
+ def deserialize(self, content):
+ return content
+
+ @property
+ def no_content_response(self):
+ return ''
+
+
+class MediaModel(JsonModel):
+ """Model class for requests that return Media.
+
+ Serializes and de-serializes between JSON and the Python
+ object representation of HTTP request, and returns the raw bytes
+ of the response body.
+ """
+ accept = '*/*'
+ content_type = 'application/json'
+ alt_param = 'media'
+
+ def deserialize(self, content):
+ return content
+
+ @property
+ def no_content_response(self):
+ return ''
+
+
+class ProtocolBufferModel(BaseModel):
+ """Model class for protocol buffers.
+
+ Serializes and de-serializes the binary protocol buffer sent in the HTTP
+ request and response bodies.
+ """
+ accept = 'application/x-protobuf'
+ content_type = 'application/x-protobuf'
+ alt_param = 'proto'
+
+ def __init__(self, protocol_buffer):
+ """Constructs a ProtocolBufferModel.
+
+ The serialzed protocol buffer returned in an HTTP response will be
+ de-serialized using the given protocol buffer class.
+
+ Args:
+ protocol_buffer: The protocol buffer class used to de-serialize a
+ response from the API.
+ """
+ self._protocol_buffer = protocol_buffer
+
+ def serialize(self, body_value):
+ return body_value.SerializeToString()
+
+ def deserialize(self, content):
+ return self._protocol_buffer.FromString(content)
+
+ @property
+ def no_content_response(self):
+ return self._protocol_buffer()
+
+
+def makepatch(original, modified):
+ """Create a patch object.
+
+ Some methods support PATCH, an efficient way to send updates to a resource.
+ This method allows the easy construction of patch bodies by looking at the
+ differences between a resource before and after it was modified.
+
+ Args:
+ original: object, the original deserialized resource
+ modified: object, the modified deserialized resource
+ Returns:
+ An object that contains only the changes from original to modified, in a
+ form suitable to pass to a PATCH method.
+
+ Example usage:
+ item = service.activities().get(postid=postid, userid=userid).execute()
+ original = copy.deepcopy(item)
+ item['object']['content'] = 'This is updated.'
+ service.activities.patch(postid=postid, userid=userid,
+ body=makepatch(original, item)).execute()
+ """
+ patch = {}
+ for key, original_value in six.iteritems(original):
+ modified_value = modified.get(key, None)
+ if modified_value is None:
+ # Use None to signal that the element is deleted
+ patch[key] = None
+ elif original_value != modified_value:
+ if type(original_value) == type({}):
+ # Recursively descend objects
+ patch[key] = makepatch(original_value, modified_value)
+ else:
+ # In the case of simple types or arrays we just replace
+ patch[key] = modified_value
+ else:
+ # Don't add anything to patch if there's no change
+ pass
+ for key in modified:
+ if key not in original:
+ patch[key] = modified[key]
+
+ return patch
diff --git a/googleapiclient/sample_tools.py b/googleapiclient/sample_tools.py
new file mode 100644
index 0000000..5cb7a06
--- /dev/null
+++ b/googleapiclient/sample_tools.py
@@ -0,0 +1,106 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for making samples.
+
+Consolidates a lot of code commonly repeated in sample applications.
+"""
+from __future__ import absolute_import
+
+__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+__all__ = ['init']
+
+
+import argparse
+import os
+
+from googleapiclient import discovery
+from googleapiclient.http import build_http
+
+def init(argv, name, version, doc, filename, scope=None, parents=[], discovery_filename=None):
+ """A common initialization routine for samples.
+
+ Many of the sample applications do the same initialization, which has now
+ been consolidated into this function. This function uses common idioms found
+ in almost all the samples, i.e. for an API with name 'apiname', the
+ credentials are stored in a file named apiname.dat, and the
+ client_secrets.json file is stored in the same directory as the application
+ main file.
+
+ Args:
+ argv: list of string, the command-line parameters of the application.
+ name: string, name of the API.
+ version: string, version of the API.
+ doc: string, description of the application. Usually set to __doc__.
+ file: string, filename of the application. Usually set to __file__.
+ parents: list of argparse.ArgumentParser, additional command-line flags.
+ scope: string, The OAuth scope used.
+ discovery_filename: string, name of local discovery file (JSON). Use when discovery doc not available via URL.
+
+ Returns:
+ A tuple of (service, flags), where service is the service object and flags
+ is the parsed command-line flags.
+ """
+ try:
+ from oauth2client import client
+ from oauth2client import file
+ from oauth2client import tools
+ except ImportError:
+ raise ImportError('googleapiclient.sample_tools requires oauth2client. Please install oauth2client and try again.')
+
+ if scope is None:
+ scope = 'https://www.googleapis.com/auth/' + name
+
+ # Parser command-line arguments.
+ parent_parsers = [tools.argparser]
+ parent_parsers.extend(parents)
+ parser = argparse.ArgumentParser(
+ description=doc,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ parents=parent_parsers)
+ flags = parser.parse_args(argv[1:])
+
+ # Name of a file containing the OAuth 2.0 information for this
+ # application, including client_id and client_secret, which are found
+ # on the API Access tab on the Google APIs
+ # Console .
+ client_secrets = os.path.join(os.path.dirname(filename),
+ 'client_secrets.json')
+
+ # Set up a Flow object to be used if we need to authenticate.
+ flow = client.flow_from_clientsecrets(client_secrets,
+ scope=scope,
+ message=tools.message_if_missing(client_secrets))
+
+ # Prepare credentials, and authorize HTTP object with them.
+ # If the credentials don't exist or are invalid run through the native client
+ # flow. The Storage object will ensure that if successful the good
+ # credentials will get written back to a file.
+ storage = file.Storage(name + '.dat')
+ credentials = storage.get()
+ if credentials is None or credentials.invalid:
+ credentials = tools.run_flow(flow, storage, flags)
+ http = credentials.authorize(http=build_http())
+
+ if discovery_filename is None:
+ # Construct a service object via the discovery service.
+ service = discovery.build(name, version, http=http)
+ else:
+ # Construct a service object using a local discovery document file.
+ with open(discovery_filename) as discovery_file:
+ service = discovery.build_from_document(
+ discovery_file.read(),
+ base='https://www.googleapis.com/',
+ http=http)
+ return (service, flags)
diff --git a/googleapiclient/schema.py b/googleapiclient/schema.py
new file mode 100644
index 0000000..10d4a1b
--- /dev/null
+++ b/googleapiclient/schema.py
@@ -0,0 +1,314 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Schema processing for discovery based APIs
+
+Schemas holds an APIs discovery schemas. It can return those schema as
+deserialized JSON objects, or pretty print them as prototype objects that
+conform to the schema.
+
+For example, given the schema:
+
+ schema = \"\"\"{
+ "Foo": {
+ "type": "object",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "ETag of the collection."
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the collection ('calendar#acl').",
+ "default": "calendar#acl"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "Token used to access the next
+ page of this result. Omitted if no further results are available."
+ }
+ }
+ }
+ }\"\"\"
+
+ s = Schemas(schema)
+ print s.prettyPrintByName('Foo')
+
+ Produces the following output:
+
+ {
+ "nextPageToken": "A String", # Token used to access the
+ # next page of this result. Omitted if no further results are available.
+ "kind": "A String", # Type of the collection ('calendar#acl').
+ "etag": "A String", # ETag of the collection.
+ },
+
+The constructor takes a discovery document in which to look up named schema.
+"""
+from __future__ import absolute_import
+import six
+
+# TODO(jcgregorio) support format, enum, minimum, maximum
+
+__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+
+import copy
+
+from googleapiclient import _helpers as util
+
+
+class Schemas(object):
+ """Schemas for an API."""
+
+ def __init__(self, discovery):
+ """Constructor.
+
+ Args:
+ discovery: object, Deserialized discovery document from which we pull
+ out the named schema.
+ """
+ self.schemas = discovery.get('schemas', {})
+
+ # Cache of pretty printed schemas.
+ self.pretty = {}
+
+ @util.positional(2)
+ def _prettyPrintByName(self, name, seen=None, dent=0):
+ """Get pretty printed object prototype from the schema name.
+
+ Args:
+ name: string, Name of schema in the discovery document.
+ seen: list of string, Names of schema already seen. Used to handle
+ recursive definitions.
+
+ Returns:
+ string, A string that contains a prototype object with
+ comments that conforms to the given schema.
+ """
+ if seen is None:
+ seen = []
+
+ if name in seen:
+ # Do not fall into an infinite loop over recursive definitions.
+ return '# Object with schema name: %s' % name
+ seen.append(name)
+
+ if name not in self.pretty:
+ self.pretty[name] = _SchemaToStruct(self.schemas[name],
+ seen, dent=dent).to_str(self._prettyPrintByName)
+
+ seen.pop()
+
+ return self.pretty[name]
+
+ def prettyPrintByName(self, name):
+ """Get pretty printed object prototype from the schema name.
+
+ Args:
+ name: string, Name of schema in the discovery document.
+
+ Returns:
+ string, A string that contains a prototype object with
+ comments that conforms to the given schema.
+ """
+ # Return with trailing comma and newline removed.
+ return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
+
+ @util.positional(2)
+ def _prettyPrintSchema(self, schema, seen=None, dent=0):
+ """Get pretty printed object prototype of schema.
+
+ Args:
+ schema: object, Parsed JSON schema.
+ seen: list of string, Names of schema already seen. Used to handle
+ recursive definitions.
+
+ Returns:
+ string, A string that contains a prototype object with
+ comments that conforms to the given schema.
+ """
+ if seen is None:
+ seen = []
+
+ return _SchemaToStruct(schema, seen, dent=dent).to_str(self._prettyPrintByName)
+
+ def prettyPrintSchema(self, schema):
+ """Get pretty printed object prototype of schema.
+
+ Args:
+ schema: object, Parsed JSON schema.
+
+ Returns:
+ string, A string that contains a prototype object with
+ comments that conforms to the given schema.
+ """
+ # Return with trailing comma and newline removed.
+ return self._prettyPrintSchema(schema, dent=1)[:-2]
+
+ def get(self, name, default=None):
+ """Get deserialized JSON schema from the schema name.
+
+ Args:
+ name: string, Schema name.
+ default: object, return value if name not found.
+ """
+ return self.schemas.get(name, default)
+
+
+class _SchemaToStruct(object):
+ """Convert schema to a prototype object."""
+
+ @util.positional(3)
+ def __init__(self, schema, seen, dent=0):
+ """Constructor.
+
+ Args:
+ schema: object, Parsed JSON schema.
+ seen: list, List of names of schema already seen while parsing. Used to
+ handle recursive definitions.
+ dent: int, Initial indentation depth.
+ """
+ # The result of this parsing kept as list of strings.
+ self.value = []
+
+ # The final value of the parsing.
+ self.string = None
+
+ # The parsed JSON schema.
+ self.schema = schema
+
+ # Indentation level.
+ self.dent = dent
+
+ # Method that when called returns a prototype object for the schema with
+ # the given name.
+ self.from_cache = None
+
+ # List of names of schema already seen while parsing.
+ self.seen = seen
+
+ def emit(self, text):
+ """Add text as a line to the output.
+
+ Args:
+ text: string, Text to output.
+ """
+ self.value.extend([" " * self.dent, text, '\n'])
+
+ def emitBegin(self, text):
+ """Add text to the output, but with no line terminator.
+
+ Args:
+ text: string, Text to output.
+ """
+ self.value.extend([" " * self.dent, text])
+
+ def emitEnd(self, text, comment):
+ """Add text and comment to the output with line terminator.
+
+ Args:
+ text: string, Text to output.
+ comment: string, Python comment.
+ """
+ if comment:
+ divider = '\n' + ' ' * (self.dent + 2) + '# '
+ lines = comment.splitlines()
+ lines = [x.rstrip() for x in lines]
+ comment = divider.join(lines)
+ self.value.extend([text, ' # ', comment, '\n'])
+ else:
+ self.value.extend([text, '\n'])
+
+ def indent(self):
+ """Increase indentation level."""
+ self.dent += 1
+
+ def undent(self):
+ """Decrease indentation level."""
+ self.dent -= 1
+
+ def _to_str_impl(self, schema):
+ """Prototype object based on the schema, in Python code with comments.
+
+ Args:
+ schema: object, Parsed JSON schema file.
+
+ Returns:
+ Prototype object based on the schema, in Python code with comments.
+ """
+ stype = schema.get('type')
+ if stype == 'object':
+ self.emitEnd('{', schema.get('description', ''))
+ self.indent()
+ if 'properties' in schema:
+ for pname, pschema in six.iteritems(schema.get('properties', {})):
+ self.emitBegin('"%s": ' % pname)
+ self._to_str_impl(pschema)
+ elif 'additionalProperties' in schema:
+ self.emitBegin('"a_key": ')
+ self._to_str_impl(schema['additionalProperties'])
+ self.undent()
+ self.emit('},')
+ elif '$ref' in schema:
+ schemaName = schema['$ref']
+ description = schema.get('description', '')
+ s = self.from_cache(schemaName, seen=self.seen)
+ parts = s.splitlines()
+ self.emitEnd(parts[0], description)
+ for line in parts[1:]:
+ self.emit(line.rstrip())
+ elif stype == 'boolean':
+ value = schema.get('default', 'True or False')
+ self.emitEnd('%s,' % str(value), schema.get('description', ''))
+ elif stype == 'string':
+ value = schema.get('default', 'A String')
+ self.emitEnd('"%s",' % str(value), schema.get('description', ''))
+ elif stype == 'integer':
+ value = schema.get('default', '42')
+ self.emitEnd('%s,' % str(value), schema.get('description', ''))
+ elif stype == 'number':
+ value = schema.get('default', '3.14')
+ self.emitEnd('%s,' % str(value), schema.get('description', ''))
+ elif stype == 'null':
+ self.emitEnd('None,', schema.get('description', ''))
+ elif stype == 'any':
+ self.emitEnd('"",', schema.get('description', ''))
+ elif stype == 'array':
+ self.emitEnd('[', schema.get('description'))
+ self.indent()
+ self.emitBegin('')
+ self._to_str_impl(schema['items'])
+ self.undent()
+ self.emit('],')
+ else:
+ self.emit('Unknown type! %s' % stype)
+ self.emitEnd('', '')
+
+ self.string = ''.join(self.value)
+ return self.string
+
+ def to_str(self, from_cache):
+ """Prototype object based on the schema, in Python code with comments.
+
+ Args:
+ from_cache: callable(name, seen), Callable that retrieves an object
+ prototype for a schema with the given name. Seen is a list of schema
+ names already seen as we recursively descend the schema definition.
+
+ Returns:
+ Prototype object based on the schema, in Python code with comments.
+ The lines of the code will all be properly indented.
+ """
+ self.from_cache = from_cache
+ return self._to_str_impl(self.schema)
diff --git a/webdrivers/chromedriver.exe b/webdrivers/chromedriver.exe
new file mode 100644
index 0000000..1f542be
Binary files /dev/null and b/webdrivers/chromedriver.exe differ
diff --git a/webdrivers/geckodriver.exe b/webdrivers/geckodriver.exe
new file mode 100644
index 0000000..1aba699
Binary files /dev/null and b/webdrivers/geckodriver.exe differ