If I understand your question, the following should serve the purpose in the right way. I used requests module instead of selenium to make it robust.
import requests
from bs4 import BeautifulSoup
url = 'https://www.ebmnews.com/2020/page/{}/'
current_page = 948
with requests.Session() as s:
s.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
while current_page!=998: #highest page to traverse
r = s.get(url.format(current_page))
soup = BeautifulSoup(r.text,"html.parser")
for item in soup.select('article.listing-item'):
try:
post_author = item.select_one("i.post-author").get_text(strip=True)
except AttributeError: post_author = ""
try:
post_date = item.select_one("span.time > time").get_text(strip=True)
except AttributeError: post_date = ""
inner_link = item.select_one("h2.title > a").get("href")
res = s.get(inner_link)
sauce = BeautifulSoup(res.text,"html.parser")
title = sauce.select_one("span[itemprop='headline']").get_text(strip=True)
desc = ' '.join([item.get_text(strip=True) for item in sauce.select(".entry-content > p")])
print(post_author,post_date,title,desc)
current_page+=1
与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…